blob: aef852eac292627d33f6b867e196fcb3fb7e9bda [file] [log] [blame]
Joe Perchesc767a542012-05-21 19:50:07 -07001#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
Suresh Siddha61c46282008-03-10 15:28:04 -07003#include <linux/errno.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/smp.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -08007#include <linux/prctl.h>
Suresh Siddha61c46282008-03-10 15:28:04 -07008#include <linux/slab.h>
9#include <linux/sched.h>
Peter Zijlstra7f424a82008-04-25 17:39:01 +020010#include <linux/module.h>
11#include <linux/pm.h>
Thomas Gleixneraa276e12008-06-09 19:15:00 +020012#include <linux/clockchips.h>
Amerigo Wang9d62dcd2009-05-11 22:05:28 -040013#include <linux/random.h>
Avi Kivity7c68af62009-09-19 09:40:22 +030014#include <linux/user-return-notifier.h>
Andy Isaacson814e2c82009-12-08 00:29:42 -080015#include <linux/dmi.h>
16#include <linux/utsname.h>
Richard Weinberger90e24012012-03-25 23:00:04 +020017#include <linux/stackprotector.h>
18#include <linux/tick.h>
19#include <linux/cpuidle.h>
Arjan van de Ven61613522009-09-17 16:11:28 +020020#include <trace/events/power.h>
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020021#include <linux/hw_breakpoint.h>
Borislav Petkov93789b32011-01-20 15:42:52 +010022#include <asm/cpu.h>
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +010023#include <asm/apic.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053024#include <asm/syscalls.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080025#include <asm/idle.h>
26#include <asm/uaccess.h>
27#include <asm/i387.h>
Linus Torvalds1361b832012-02-21 13:19:22 -080028#include <asm/fpu-internal.h>
K.Prasad66cb5912009-06-01 23:44:55 +053029#include <asm/debugreg.h>
Richard Weinberger90e24012012-03-25 23:00:04 +020030#include <asm/nmi.h>
31
Thomas Gleixner45046892012-05-03 09:03:01 +000032/*
33 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
34 * no more per-task TSS's. The TSS size is kept cacheline-aligned
35 * so they are allowed to end up in the .data..cacheline_aligned
36 * section. Since TSS's are completely CPU-local, we want them
37 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
38 */
39DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
40
Richard Weinberger90e24012012-03-25 23:00:04 +020041#ifdef CONFIG_X86_64
42static DEFINE_PER_CPU(unsigned char, is_idle);
43static ATOMIC_NOTIFIER_HEAD(idle_notifier);
44
45void idle_notifier_register(struct notifier_block *n)
46{
47 atomic_notifier_chain_register(&idle_notifier, n);
48}
49EXPORT_SYMBOL_GPL(idle_notifier_register);
50
51void idle_notifier_unregister(struct notifier_block *n)
52{
53 atomic_notifier_chain_unregister(&idle_notifier, n);
54}
55EXPORT_SYMBOL_GPL(idle_notifier_unregister);
56#endif
Zhao Yakuic1e3b372008-06-24 17:58:53 +080057
Suresh Siddhaaa283f42008-03-10 15:28:05 -070058struct kmem_cache *task_xstate_cachep;
Sheng Yang5ee481d2010-05-17 17:22:23 +080059EXPORT_SYMBOL_GPL(task_xstate_cachep);
Suresh Siddha61c46282008-03-10 15:28:04 -070060
Suresh Siddha55ccf3f2012-05-16 15:03:51 -070061/*
62 * this gets called so that we can store lazy state into memory and copy the
63 * current task into the new thread.
64 */
Suresh Siddha61c46282008-03-10 15:28:04 -070065int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
66{
Avi Kivity86603282010-05-06 11:45:46 +030067 int ret;
68
Suresh Siddha61c46282008-03-10 15:28:04 -070069 *dst = *src;
Avi Kivity86603282010-05-06 11:45:46 +030070 if (fpu_allocated(&src->thread.fpu)) {
71 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
72 ret = fpu_alloc(&dst->thread.fpu);
73 if (ret)
74 return ret;
Suresh Siddha304bced2012-08-24 14:13:02 -070075 fpu_copy(dst, src);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070076 }
Suresh Siddha61c46282008-03-10 15:28:04 -070077 return 0;
78}
79
Suresh Siddhaaa283f42008-03-10 15:28:05 -070080void free_thread_xstate(struct task_struct *tsk)
81{
Avi Kivity86603282010-05-06 11:45:46 +030082 fpu_free(&tsk->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070083}
84
Thomas Gleixner38e7c572012-05-05 15:05:42 +000085void arch_release_task_struct(struct task_struct *tsk)
Suresh Siddha61c46282008-03-10 15:28:04 -070086{
Thomas Gleixner38e7c572012-05-05 15:05:42 +000087 free_thread_xstate(tsk);
Suresh Siddha61c46282008-03-10 15:28:04 -070088}
89
90void arch_task_cache_init(void)
91{
92 task_xstate_cachep =
93 kmem_cache_create("task_xstate", xstate_size,
94 __alignof__(union thread_xstate),
Vegard Nossum2dff4402008-05-31 15:56:17 +020095 SLAB_PANIC | SLAB_NOTRACK, NULL);
Suresh Siddha61c46282008-03-10 15:28:04 -070096}
Peter Zijlstra7f424a82008-04-25 17:39:01 +020097
Thomas Gleixner00dba562008-06-09 18:35:28 +020098/*
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080099 * Free current thread data structures etc..
100 */
101void exit_thread(void)
102{
103 struct task_struct *me = current;
104 struct thread_struct *t = &me->thread;
Thomas Gleixner250981e2009-03-16 13:07:21 +0100105 unsigned long *bp = t->io_bitmap_ptr;
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800106
Thomas Gleixner250981e2009-03-16 13:07:21 +0100107 if (bp) {
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800108 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
109
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800110 t->io_bitmap_ptr = NULL;
111 clear_thread_flag(TIF_IO_BITMAP);
112 /*
113 * Careful, clear this in the TSS too:
114 */
115 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
116 t->io_bitmap_max = 0;
117 put_cpu();
Thomas Gleixner250981e2009-03-16 13:07:21 +0100118 kfree(bp);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800119 }
Suresh Siddha1dcc8d72012-05-16 15:03:54 -0700120
121 drop_fpu(me);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800122}
123
Andy Isaacson814e2c82009-12-08 00:29:42 -0800124void show_regs_common(void)
125{
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000126 const char *vendor, *product, *board;
Andy Isaacson814e2c82009-12-08 00:29:42 -0800127
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000128 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
129 if (!vendor)
130 vendor = "";
Andy Isaacsona1884b82009-12-08 00:30:21 -0800131 product = dmi_get_system_info(DMI_PRODUCT_NAME);
132 if (!product)
133 product = "";
Andy Isaacson814e2c82009-12-08 00:29:42 -0800134
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000135 /* Board Name is optional */
136 board = dmi_get_system_info(DMI_BOARD_NAME);
137
Joe Perchesc767a542012-05-21 19:50:07 -0700138 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
139 current->pid, current->comm, print_tainted(),
140 init_utsname()->release,
141 (int)strcspn(init_utsname()->version, " "),
142 init_utsname()->version,
143 vendor, product,
144 board ? "/" : "",
145 board ? board : "");
Andy Isaacson814e2c82009-12-08 00:29:42 -0800146}
147
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800148void flush_thread(void)
149{
150 struct task_struct *tsk = current;
151
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200152 flush_ptrace_hw_breakpoint(tsk);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800153 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
Suresh Siddha304bced2012-08-24 14:13:02 -0700154 drop_init_fpu(tsk);
155 /*
156 * Free the FPU state for non xsave platforms. They get reallocated
157 * lazily at the first use.
158 */
Suresh Siddha5d2bd702012-09-06 14:58:52 -0700159 if (!use_eager_fpu())
Suresh Siddha304bced2012-08-24 14:13:02 -0700160 free_thread_xstate(tsk);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800161}
162
163static void hard_disable_TSC(void)
164{
165 write_cr4(read_cr4() | X86_CR4_TSD);
166}
167
168void disable_TSC(void)
169{
170 preempt_disable();
171 if (!test_and_set_thread_flag(TIF_NOTSC))
172 /*
173 * Must flip the CPU state synchronously with
174 * TIF_NOTSC in the current running context.
175 */
176 hard_disable_TSC();
177 preempt_enable();
178}
179
180static void hard_enable_TSC(void)
181{
182 write_cr4(read_cr4() & ~X86_CR4_TSD);
183}
184
185static void enable_TSC(void)
186{
187 preempt_disable();
188 if (test_and_clear_thread_flag(TIF_NOTSC))
189 /*
190 * Must flip the CPU state synchronously with
191 * TIF_NOTSC in the current running context.
192 */
193 hard_enable_TSC();
194 preempt_enable();
195}
196
197int get_tsc_mode(unsigned long adr)
198{
199 unsigned int val;
200
201 if (test_thread_flag(TIF_NOTSC))
202 val = PR_TSC_SIGSEGV;
203 else
204 val = PR_TSC_ENABLE;
205
206 return put_user(val, (unsigned int __user *)adr);
207}
208
209int set_tsc_mode(unsigned int val)
210{
211 if (val == PR_TSC_SIGSEGV)
212 disable_TSC();
213 else if (val == PR_TSC_ENABLE)
214 enable_TSC();
215 else
216 return -EINVAL;
217
218 return 0;
219}
220
221void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
222 struct tss_struct *tss)
223{
224 struct thread_struct *prev, *next;
225
226 prev = &prev_p->thread;
227 next = &next_p->thread;
228
Peter Zijlstraea8e61b2010-03-25 14:51:51 +0100229 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
230 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
231 unsigned long debugctl = get_debugctlmsr();
232
233 debugctl &= ~DEBUGCTLMSR_BTF;
234 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
235 debugctl |= DEBUGCTLMSR_BTF;
236
237 update_debugctlmsr(debugctl);
238 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800239
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800240 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
241 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
242 /* prev and next are different */
243 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
244 hard_disable_TSC();
245 else
246 hard_enable_TSC();
247 }
248
249 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
250 /*
251 * Copy the relevant range of the IO bitmap.
252 * Normally this is 128 bytes or less:
253 */
254 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
255 max(prev->io_bitmap_max, next->io_bitmap_max));
256 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
257 /*
258 * Clear any possible leftover bits:
259 */
260 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
261 }
Avi Kivity7c68af62009-09-19 09:40:22 +0300262 propagate_user_return_notify(prev_p, next_p);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800263}
264
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500265/*
Thomas Gleixner00dba562008-06-09 18:35:28 +0200266 * Idle related variables and functions
267 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
Thomas Gleixner00dba562008-06-09 18:35:28 +0200269EXPORT_SYMBOL(boot_option_idle_override);
270
271/*
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
Andy Whitcroft60b8b1d2011-06-14 12:45:10 -0700275#ifdef CONFIG_APM_MODULE
Thomas Gleixner00dba562008-06-09 18:35:28 +0200276EXPORT_SYMBOL(pm_idle);
Len Brown06ae40c2011-04-01 15:28:09 -0400277#endif
Thomas Gleixner00dba562008-06-09 18:35:28 +0200278
Richard Weinberger90e24012012-03-25 23:00:04 +0200279#ifndef CONFIG_SMP
280static inline void play_dead(void)
281{
282 BUG();
283}
284#endif
285
286#ifdef CONFIG_X86_64
287void enter_idle(void)
288{
Alex Shic6ae41e2012-05-11 15:35:27 +0800289 this_cpu_write(is_idle, 1);
Richard Weinberger90e24012012-03-25 23:00:04 +0200290 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
291}
292
293static void __exit_idle(void)
294{
295 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
296 return;
297 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
298}
299
300/* Called from interrupts to signify idle end */
301void exit_idle(void)
302{
303 /* idle loop has pid 0 */
304 if (current->pid)
305 return;
306 __exit_idle();
307}
308#endif
309
310/*
311 * The idle thread. There's no useful work to be
312 * done, so just try to conserve power and have a
313 * low exit latency (ie sit in a loop waiting for
314 * somebody to say that they'd like to reschedule)
315 */
316void cpu_idle(void)
317{
318 /*
319 * If we're the non-boot CPU, nothing set the stack canary up
320 * for us. CPU0 already has it initialized but no harm in
321 * doing it again. This is a good place for updating it, as
322 * we wont ever return from this function (so the invalid
323 * canaries already on the stack wont ever trigger).
324 */
325 boot_init_stack_canary();
326 current_thread_info()->status |= TS_POLLING;
327
328 while (1) {
329 tick_nohz_idle_enter();
330
331 while (!need_resched()) {
332 rmb();
333
334 if (cpu_is_offline(smp_processor_id()))
335 play_dead();
336
337 /*
338 * Idle routines should keep interrupts disabled
339 * from here on, until they go to idle.
340 * Otherwise, idle callbacks can misfire.
341 */
342 local_touch_nmi();
343 local_irq_disable();
344
345 enter_idle();
346
347 /* Don't trace irqs off for idle */
348 stop_critical_timings();
349
350 /* enter_idle() needs rcu for notifiers */
351 rcu_idle_enter();
352
353 if (cpuidle_idle_call())
354 pm_idle();
355
356 rcu_idle_exit();
357 start_critical_timings();
358
359 /* In many cases the interrupt that ended idle
360 has already called exit_idle. But some idle
361 loops can be woken up without interrupt. */
362 __exit_idle();
363 }
364
365 tick_nohz_idle_exit();
366 preempt_enable_no_resched();
367 schedule();
368 preempt_disable();
369 }
370}
371
Thomas Gleixner00dba562008-06-09 18:35:28 +0200372/*
373 * We use this if we don't have any better
374 * idle routine..
375 */
376void default_idle(void)
377{
Daniel Lezcano4d0e42c2012-10-25 18:13:11 +0200378 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
379 trace_cpu_idle_rcuidle(1, smp_processor_id());
380 current_thread_info()->status &= ~TS_POLLING;
381 /*
382 * TS_POLLING-cleared state must be visible before we
383 * test NEED_RESCHED:
384 */
385 smp_mb();
Thomas Gleixner00dba562008-06-09 18:35:28 +0200386
Daniel Lezcano4d0e42c2012-10-25 18:13:11 +0200387 if (!need_resched())
388 safe_halt(); /* enables interrupts racelessly */
389 else
Thomas Gleixner00dba562008-06-09 18:35:28 +0200390 local_irq_enable();
Daniel Lezcano4d0e42c2012-10-25 18:13:11 +0200391 current_thread_info()->status |= TS_POLLING;
392 trace_power_end_rcuidle(smp_processor_id());
393 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Thomas Gleixner00dba562008-06-09 18:35:28 +0200394}
Andy Whitcroft60b8b1d2011-06-14 12:45:10 -0700395#ifdef CONFIG_APM_MODULE
Thomas Gleixner00dba562008-06-09 18:35:28 +0200396EXPORT_SYMBOL(default_idle);
397#endif
398
Len Brown6a377dd2013-02-09 23:08:07 -0500399#ifdef CONFIG_XEN
400bool xen_set_default_idle(void)
Konrad Rzeszutek Wilke5fd47b2011-11-21 18:02:02 -0500401{
402 bool ret = !!pm_idle;
403
404 pm_idle = default_idle;
405
406 return ret;
407}
Len Brown6a377dd2013-02-09 23:08:07 -0500408#endif
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100409void stop_this_cpu(void *dummy)
410{
411 local_irq_disable();
412 /*
413 * Remove this CPU:
414 */
Rusty Russell4f062892009-03-13 14:49:54 +1030415 set_cpu_online(smp_processor_id(), false);
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100416 disable_local_APIC();
417
Len Brown27be4572013-02-10 02:28:46 -0500418 for (;;)
419 halt();
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100420}
421
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200422/*
423 * On SMP it's slightly faster (but much more power-consuming!)
424 * to poll the ->work.need_resched flag instead of waiting for the
425 * cross-CPU IPI to arrive. Use this option with caution.
426 */
427static void poll_idle(void)
428{
Steven Rostedt48454652012-02-07 09:40:30 -0500429 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
430 trace_cpu_idle_rcuidle(0, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200431 local_irq_enable();
Joe Korty2c7e9fd2008-08-27 10:35:06 -0400432 while (!need_resched())
433 cpu_relax();
Steven Rostedt48454652012-02-07 09:40:30 -0500434 trace_power_end_rcuidle(smp_processor_id());
435 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200436}
437
Len Brown02c68a02011-04-01 16:59:53 -0400438bool amd_e400_c1e_detected;
439EXPORT_SYMBOL(amd_e400_c1e_detected);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200440
Len Brown02c68a02011-04-01 16:59:53 -0400441static cpumask_var_t amd_e400_c1e_mask;
Thomas Gleixner4faac972008-09-22 18:54:29 +0200442
Len Brown02c68a02011-04-01 16:59:53 -0400443void amd_e400_remove_cpu(int cpu)
Thomas Gleixner4faac972008-09-22 18:54:29 +0200444{
Len Brown02c68a02011-04-01 16:59:53 -0400445 if (amd_e400_c1e_mask != NULL)
446 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner4faac972008-09-22 18:54:29 +0200447}
448
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200449/*
Len Brown02c68a02011-04-01 16:59:53 -0400450 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200451 * pending message MSR. If we detect C1E, then we handle it the same
452 * way as C3 power states (local apic timer and TSC stop)
453 */
Len Brown02c68a02011-04-01 16:59:53 -0400454static void amd_e400_idle(void)
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200455{
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200456 if (need_resched())
457 return;
458
Len Brown02c68a02011-04-01 16:59:53 -0400459 if (!amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200460 u32 lo, hi;
461
462 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
Michal Schmidte8c534e2010-07-27 18:53:35 +0200463
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200464 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
Len Brown02c68a02011-04-01 16:59:53 -0400465 amd_e400_c1e_detected = true;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800466 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andreas Herrmann09bfeea2008-09-18 21:12:10 +0200467 mark_tsc_unstable("TSC halt in AMD C1E");
Joe Perchesc767a542012-05-21 19:50:07 -0700468 pr_info("System has AMD C1E enabled\n");
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200469 }
470 }
471
Len Brown02c68a02011-04-01 16:59:53 -0400472 if (amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200473 int cpu = smp_processor_id();
474
Len Brown02c68a02011-04-01 16:59:53 -0400475 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
476 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200477 /*
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700478 * Force broadcast so ACPI can not interfere.
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200479 */
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200480 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
481 &cpu);
Joe Perchesc767a542012-05-21 19:50:07 -0700482 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200483 }
484 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200485
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200486 default_idle();
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200487
488 /*
489 * The switch back from broadcast mode needs to be
490 * called with interrupts disabled.
491 */
492 local_irq_disable();
493 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
494 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200495 } else
496 default_idle();
497}
498
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200499void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
500{
Ingo Molnar3e5095d2009-01-27 17:07:08 +0100501#ifdef CONFIG_SMP
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200502 if (pm_idle == poll_idle && smp_num_siblings > 1) {
Joe Perchesc767a542012-05-21 19:50:07 -0700503 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200504 }
505#endif
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200506 if (pm_idle)
507 return;
508
Len Brown69fb3672013-02-10 01:38:39 -0500509 if (cpu_has_amd_erratum(amd_erratum_400)) {
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200510 /* E400: APIC timer interrupt does not wake up CPU from C1e */
Joe Perchesc767a542012-05-21 19:50:07 -0700511 pr_info("using AMD E400 aware idle routine\n");
Len Brown02c68a02011-04-01 16:59:53 -0400512 pm_idle = amd_e400_idle;
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200513 } else
514 pm_idle = default_idle;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200515}
516
Len Brown02c68a02011-04-01 16:59:53 -0400517void __init init_amd_e400_c1e_mask(void)
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030518{
Len Brown02c68a02011-04-01 16:59:53 -0400519 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
520 if (pm_idle == amd_e400_idle)
521 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030522}
523
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200524static int __init idle_setup(char *str)
525{
Cyrill Gorcunovab6bc3e2008-07-05 15:53:36 +0400526 if (!str)
527 return -EINVAL;
528
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200529 if (!strcmp(str, "poll")) {
Joe Perchesc767a542012-05-21 19:50:07 -0700530 pr_info("using polling idle threads\n");
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200531 pm_idle = poll_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100532 boot_option_idle_override = IDLE_POLL;
Thomas Renningerd1896042010-11-03 17:06:14 +0100533 } else if (!strcmp(str, "halt")) {
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800534 /*
535 * When the boot option of idle=halt is added, halt is
536 * forced to be used for CPU idle. In such case CPU C2/C3
537 * won't be used again.
538 * To continue to load the CPU idle driver, don't touch
539 * the boot_option_idle_override.
540 */
541 pm_idle = default_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100542 boot_option_idle_override = IDLE_HALT;
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800543 } else if (!strcmp(str, "nomwait")) {
544 /*
545 * If the boot option of "idle=nomwait" is added,
546 * it means that mwait will be disabled for CPU C2/C3
547 * states. In such case it won't touch the variable
548 * of boot_option_idle_override.
549 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100550 boot_option_idle_override = IDLE_NOMWAIT;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800551 } else
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200552 return -1;
553
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200554 return 0;
555}
556early_param("idle", idle_setup);
557
Amerigo Wang9d62dcd2009-05-11 22:05:28 -0400558unsigned long arch_align_stack(unsigned long sp)
559{
560 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
561 sp -= get_random_int() % 8192;
562 return sp & ~0xf;
563}
564
565unsigned long arch_randomize_brk(struct mm_struct *mm)
566{
567 unsigned long range_end = mm->brk + 0x02000000;
568 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
569}
570