blob: 2efbfb712fb7807a74c1377d2f667a165e60a6c8 [file] [log] [blame]
Suresh Siddha61c46282008-03-10 15:28:04 -07001#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/smp.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -08005#include <linux/prctl.h>
Suresh Siddha61c46282008-03-10 15:28:04 -07006#include <linux/slab.h>
7#include <linux/sched.h>
Peter Zijlstra7f424a82008-04-25 17:39:01 +02008#include <linux/module.h>
9#include <linux/pm.h>
Thomas Gleixneraa276e12008-06-09 19:15:00 +020010#include <linux/clockchips.h>
Amerigo Wang9d62dcd2009-05-11 22:05:28 -040011#include <linux/random.h>
Avi Kivity7c68af62009-09-19 09:40:22 +030012#include <linux/user-return-notifier.h>
Andy Isaacson814e2c82009-12-08 00:29:42 -080013#include <linux/dmi.h>
14#include <linux/utsname.h>
Arjan van de Ven61613522009-09-17 16:11:28 +020015#include <trace/events/power.h>
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020016#include <linux/hw_breakpoint.h>
Borislav Petkov93789b32011-01-20 15:42:52 +010017#include <asm/cpu.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080018#include <asm/system.h>
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +010019#include <asm/apic.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053020#include <asm/syscalls.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080021#include <asm/idle.h>
22#include <asm/uaccess.h>
23#include <asm/i387.h>
K.Prasad66cb5912009-06-01 23:44:55 +053024#include <asm/debugreg.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080025
Suresh Siddhaaa283f42008-03-10 15:28:05 -070026struct kmem_cache *task_xstate_cachep;
Sheng Yang5ee481d2010-05-17 17:22:23 +080027EXPORT_SYMBOL_GPL(task_xstate_cachep);
Suresh Siddha61c46282008-03-10 15:28:04 -070028
29int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
30{
Avi Kivity86603282010-05-06 11:45:46 +030031 int ret;
32
Suresh Siddha61c46282008-03-10 15:28:04 -070033 *dst = *src;
Avi Kivity86603282010-05-06 11:45:46 +030034 if (fpu_allocated(&src->thread.fpu)) {
35 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
36 ret = fpu_alloc(&dst->thread.fpu);
37 if (ret)
38 return ret;
39 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070040 }
Suresh Siddha61c46282008-03-10 15:28:04 -070041 return 0;
42}
43
Suresh Siddhaaa283f42008-03-10 15:28:05 -070044void free_thread_xstate(struct task_struct *tsk)
45{
Avi Kivity86603282010-05-06 11:45:46 +030046 fpu_free(&tsk->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070047}
48
Suresh Siddha61c46282008-03-10 15:28:04 -070049void free_thread_info(struct thread_info *ti)
50{
Suresh Siddhaaa283f42008-03-10 15:28:05 -070051 free_thread_xstate(ti->task);
Suresh Siddha1679f272008-04-16 10:27:53 +020052 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
Suresh Siddha61c46282008-03-10 15:28:04 -070053}
54
55void arch_task_cache_init(void)
56{
57 task_xstate_cachep =
58 kmem_cache_create("task_xstate", xstate_size,
59 __alignof__(union thread_xstate),
Vegard Nossum2dff4402008-05-31 15:56:17 +020060 SLAB_PANIC | SLAB_NOTRACK, NULL);
Suresh Siddha61c46282008-03-10 15:28:04 -070061}
Peter Zijlstra7f424a82008-04-25 17:39:01 +020062
Thomas Gleixner00dba562008-06-09 18:35:28 +020063/*
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080064 * Free current thread data structures etc..
65 */
66void exit_thread(void)
67{
68 struct task_struct *me = current;
69 struct thread_struct *t = &me->thread;
Thomas Gleixner250981e2009-03-16 13:07:21 +010070 unsigned long *bp = t->io_bitmap_ptr;
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080071
Thomas Gleixner250981e2009-03-16 13:07:21 +010072 if (bp) {
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080073 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
74
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080075 t->io_bitmap_ptr = NULL;
76 clear_thread_flag(TIF_IO_BITMAP);
77 /*
78 * Careful, clear this in the TSS too:
79 */
80 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
81 t->io_bitmap_max = 0;
82 put_cpu();
Thomas Gleixner250981e2009-03-16 13:07:21 +010083 kfree(bp);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080084 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080085}
86
Brian Gerst3bef4442010-01-13 10:45:55 -050087void show_regs(struct pt_regs *regs)
88{
89 show_registers(regs);
Soeren Sandmann Pedersen9c0729d2010-11-05 05:59:39 -040090 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs));
Brian Gerst3bef4442010-01-13 10:45:55 -050091}
92
Andy Isaacson814e2c82009-12-08 00:29:42 -080093void show_regs_common(void)
94{
Naga Chumbalkar84e383b2011-02-14 22:47:17 +000095 const char *vendor, *product, *board;
Andy Isaacson814e2c82009-12-08 00:29:42 -080096
Naga Chumbalkar84e383b2011-02-14 22:47:17 +000097 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
98 if (!vendor)
99 vendor = "";
Andy Isaacsona1884b82009-12-08 00:30:21 -0800100 product = dmi_get_system_info(DMI_PRODUCT_NAME);
101 if (!product)
102 product = "";
Andy Isaacson814e2c82009-12-08 00:29:42 -0800103
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000104 /* Board Name is optional */
105 board = dmi_get_system_info(DMI_BOARD_NAME);
106
Pekka Enbergd015a092009-12-28 10:26:59 +0200107 printk(KERN_CONT "\n");
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000108 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
Andy Isaacson814e2c82009-12-08 00:29:42 -0800109 current->pid, current->comm, print_tainted(),
110 init_utsname()->release,
111 (int)strcspn(init_utsname()->version, " "),
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000112 init_utsname()->version);
113 printk(KERN_CONT " ");
114 printk(KERN_CONT "%s %s", vendor, product);
115 if (board) {
116 printk(KERN_CONT "/");
117 printk(KERN_CONT "%s", board);
118 }
119 printk(KERN_CONT "\n");
Andy Isaacson814e2c82009-12-08 00:29:42 -0800120}
121
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800122void flush_thread(void)
123{
124 struct task_struct *tsk = current;
125
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200126 flush_ptrace_hw_breakpoint(tsk);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800127 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
128 /*
129 * Forget coprocessor state..
130 */
131 tsk->fpu_counter = 0;
132 clear_fpu(tsk);
133 clear_used_math();
134}
135
136static void hard_disable_TSC(void)
137{
138 write_cr4(read_cr4() | X86_CR4_TSD);
139}
140
141void disable_TSC(void)
142{
143 preempt_disable();
144 if (!test_and_set_thread_flag(TIF_NOTSC))
145 /*
146 * Must flip the CPU state synchronously with
147 * TIF_NOTSC in the current running context.
148 */
149 hard_disable_TSC();
150 preempt_enable();
151}
152
153static void hard_enable_TSC(void)
154{
155 write_cr4(read_cr4() & ~X86_CR4_TSD);
156}
157
158static void enable_TSC(void)
159{
160 preempt_disable();
161 if (test_and_clear_thread_flag(TIF_NOTSC))
162 /*
163 * Must flip the CPU state synchronously with
164 * TIF_NOTSC in the current running context.
165 */
166 hard_enable_TSC();
167 preempt_enable();
168}
169
170int get_tsc_mode(unsigned long adr)
171{
172 unsigned int val;
173
174 if (test_thread_flag(TIF_NOTSC))
175 val = PR_TSC_SIGSEGV;
176 else
177 val = PR_TSC_ENABLE;
178
179 return put_user(val, (unsigned int __user *)adr);
180}
181
182int set_tsc_mode(unsigned int val)
183{
184 if (val == PR_TSC_SIGSEGV)
185 disable_TSC();
186 else if (val == PR_TSC_ENABLE)
187 enable_TSC();
188 else
189 return -EINVAL;
190
191 return 0;
192}
193
194void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
195 struct tss_struct *tss)
196{
197 struct thread_struct *prev, *next;
198
199 prev = &prev_p->thread;
200 next = &next_p->thread;
201
Peter Zijlstraea8e61b2010-03-25 14:51:51 +0100202 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
203 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
204 unsigned long debugctl = get_debugctlmsr();
205
206 debugctl &= ~DEBUGCTLMSR_BTF;
207 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
208 debugctl |= DEBUGCTLMSR_BTF;
209
210 update_debugctlmsr(debugctl);
211 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800212
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800213 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
214 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
215 /* prev and next are different */
216 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
217 hard_disable_TSC();
218 else
219 hard_enable_TSC();
220 }
221
222 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
223 /*
224 * Copy the relevant range of the IO bitmap.
225 * Normally this is 128 bytes or less:
226 */
227 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
228 max(prev->io_bitmap_max, next->io_bitmap_max));
229 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
230 /*
231 * Clear any possible leftover bits:
232 */
233 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
234 }
Avi Kivity7c68af62009-09-19 09:40:22 +0300235 propagate_user_return_notify(prev_p, next_p);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800236}
237
238int sys_fork(struct pt_regs *regs)
239{
240 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
241}
242
243/*
244 * This is trivial, and on the face of it looks like it
245 * could equally well be done in user mode.
246 *
247 * Not so, for quite unobvious reasons - register pressure.
248 * In user mode vfork() cannot have a stack frame, and if
249 * done by calling the "clone()" system call directly, you
250 * do not have enough call-clobbered registers to hold all
251 * the information you need.
252 */
253int sys_vfork(struct pt_regs *regs)
254{
255 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
256 NULL, NULL);
257}
258
Brian Gerstf839bbc2009-12-09 19:01:56 -0500259long
260sys_clone(unsigned long clone_flags, unsigned long newsp,
261 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
262{
263 if (!newsp)
264 newsp = regs->sp;
265 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
266}
267
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500268/*
269 * This gets run with %si containing the
270 * function to call, and %di containing
271 * the "args".
272 */
273extern void kernel_thread_helper(void);
274
275/*
276 * Create a kernel thread
277 */
278int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
279{
280 struct pt_regs regs;
281
282 memset(&regs, 0, sizeof(regs));
283
284 regs.si = (unsigned long) fn;
285 regs.di = (unsigned long) arg;
286
287#ifdef CONFIG_X86_32
288 regs.ds = __USER_DS;
289 regs.es = __USER_DS;
290 regs.fs = __KERNEL_PERCPU;
291 regs.gs = __KERNEL_STACK_CANARY;
Cyrill Gorcunov864a0922010-01-13 10:16:07 +0000292#else
293 regs.ss = __KERNEL_DS;
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500294#endif
295
296 regs.orig_ax = -1;
297 regs.ip = (unsigned long) kernel_thread_helper;
298 regs.cs = __KERNEL_CS | get_kernel_rpl();
299 regs.flags = X86_EFLAGS_IF | 0x2;
300
301 /* Ok, create the new process.. */
302 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
303}
304EXPORT_SYMBOL(kernel_thread);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800305
306/*
Brian Gerst11cf88b2009-12-09 19:01:53 -0500307 * sys_execve() executes a new program.
308 */
David Howellsd7627462010-08-17 23:52:56 +0100309long sys_execve(const char __user *name,
310 const char __user *const __user *argv,
311 const char __user *const __user *envp, struct pt_regs *regs)
Brian Gerst11cf88b2009-12-09 19:01:53 -0500312{
313 long error;
314 char *filename;
315
316 filename = getname(name);
317 error = PTR_ERR(filename);
318 if (IS_ERR(filename))
319 return error;
320 error = do_execve(filename, argv, envp, regs);
321
322#ifdef CONFIG_X86_32
323 if (error == 0) {
324 /* Make sure we don't return using sysenter.. */
325 set_thread_flag(TIF_IRET);
326 }
327#endif
328
329 putname(filename);
330 return error;
331}
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200332
333/*
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200334 * Idle related variables and functions
335 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100336unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200337EXPORT_SYMBOL(boot_option_idle_override);
338
339/*
340 * Powermanagement idle function, if any..
341 */
342void (*pm_idle)(void);
343EXPORT_SYMBOL(pm_idle);
344
345#ifdef CONFIG_X86_32
346/*
347 * This halt magic was a workaround for ancient floppy DMA
348 * wreckage. It should be safe to remove.
349 */
350static int hlt_counter;
351void disable_hlt(void)
352{
353 hlt_counter++;
354}
355EXPORT_SYMBOL(disable_hlt);
356
357void enable_hlt(void)
358{
359 hlt_counter--;
360}
361EXPORT_SYMBOL(enable_hlt);
362
363static inline int hlt_use_halt(void)
364{
365 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
366}
367#else
368static inline int hlt_use_halt(void)
369{
370 return 1;
371}
372#endif
373
374/*
375 * We use this if we don't have any better
376 * idle routine..
377 */
378void default_idle(void)
379{
380 if (hlt_use_halt()) {
Thomas Renninger6f4f2722010-04-20 13:17:36 +0200381 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
Thomas Renninger25e41932011-01-03 17:50:44 +0100382 trace_cpu_idle(1, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200383 current_thread_info()->status &= ~TS_POLLING;
384 /*
385 * TS_POLLING-cleared state must be visible before we
386 * test NEED_RESCHED:
387 */
388 smp_mb();
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200389
390 if (!need_resched())
391 safe_halt(); /* enables interrupts racelessly */
392 else
393 local_irq_enable();
394 current_thread_info()->status |= TS_POLLING;
Thomas Renningerf77cfe42011-01-07 11:29:44 +0100395 trace_power_end(smp_processor_id());
396 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200397 } else {
398 local_irq_enable();
399 /* loop is done by the caller */
400 cpu_relax();
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200401 }
402}
403#ifdef CONFIG_APM_MODULE
404EXPORT_SYMBOL(default_idle);
405#endif
406
407void stop_this_cpu(void *dummy)
408{
409 local_irq_disable();
410 /*
411 * Remove this CPU:
412 */
Rusty Russell4f062892009-03-13 14:49:54 +1030413 set_cpu_online(smp_processor_id(), false);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200414 disable_local_APIC();
415
416 for (;;) {
417 if (hlt_works(smp_processor_id()))
418 halt();
419 }
420}
421
422static void do_nothing(void *unused)
423{
424}
425
426/*
427 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
428 * pm_idle and update to new pm_idle value. Required while changing pm_idle
429 * handler on SMP systems.
430 *
431 * Caller must have changed pm_idle to the new value before the call. Old
432 * pm_idle value will not be used by any CPU after the return of this function.
433 */
434void cpu_idle_wait(void)
435{
436 smp_mb();
437 /* kick all the CPUs so that they exit out of pm_idle */
438 smp_call_function(do_nothing, NULL, 1);
439}
440EXPORT_SYMBOL_GPL(cpu_idle_wait);
441
442/*
443 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
444 * which can obviate IPI to trigger checking of need_resched.
445 * We execute MONITOR against need_resched and enter optimized wait state
446 * through MWAIT. Whenever someone changes need_resched, we would be woken
447 * up from MWAIT (without an IPI).
448 *
449 * New with Core Duo processors, MWAIT can take some hints based on CPU
450 * capability.
451 */
452void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
453{
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200454 if (!need_resched()) {
Tejun Heo7b543a52010-12-18 16:30:05 +0100455 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200456 clflush((void *)&current_thread_info()->flags);
457
458 __monitor((void *)&current_thread_info()->flags, 0, 0);
459 smp_mb();
460 if (!need_resched())
461 __mwait(ax, cx);
462 }
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200463}
464
465/* Default MONITOR/MWAIT with no hints, used for default C1 state */
466static void mwait_idle(void)
467{
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200468 if (!need_resched()) {
Thomas Renninger6f4f2722010-04-20 13:17:36 +0200469 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
Thomas Renninger25e41932011-01-03 17:50:44 +0100470 trace_cpu_idle(1, smp_processor_id());
Tejun Heo7b543a52010-12-18 16:30:05 +0100471 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200472 clflush((void *)&current_thread_info()->flags);
473
474 __monitor((void *)&current_thread_info()->flags, 0, 0);
475 smp_mb();
476 if (!need_resched())
477 __sti_mwait(0, 0);
478 else
479 local_irq_enable();
Thomas Renningerf77cfe42011-01-07 11:29:44 +0100480 trace_power_end(smp_processor_id());
481 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200482 } else
483 local_irq_enable();
484}
485
486/*
487 * On SMP it's slightly faster (but much more power-consuming!)
488 * to poll the ->work.need_resched flag instead of waiting for the
489 * cross-CPU IPI to arrive. Use this option with caution.
490 */
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200491static void poll_idle(void)
492{
Thomas Renninger6f4f2722010-04-20 13:17:36 +0200493 trace_power_start(POWER_CSTATE, 0, smp_processor_id());
Thomas Renninger25e41932011-01-03 17:50:44 +0100494 trace_cpu_idle(0, smp_processor_id());
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200495 local_irq_enable();
496 while (!need_resched())
497 cpu_relax();
Thomas Renninger25e41932011-01-03 17:50:44 +0100498 trace_power_end(smp_processor_id());
499 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200500}
501
502/*
503 * mwait selection logic:
504 *
505 * It depends on the CPU. For AMD CPUs that support MWAIT this is
506 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
507 * then depend on a clock divisor and current Pstate of the core. If
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200508 * all cores of a processor are in halt state (C1) the processor can
509 * enter the C1E (C1 enhanced) state. If mwait is used this will never
510 * happen.
511 *
512 * idle=mwait overrides this decision and forces the usage of mwait.
513 */
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200514
515#define MWAIT_INFO 0x05
516#define MWAIT_ECX_EXTENDED_INFO 0x01
517#define MWAIT_EDX_C1 0xf0
518
Borislav Petkov1c9d16e2011-02-11 18:17:54 +0100519int mwait_usable(const struct cpuinfo_x86 *c)
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200520{
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200521 u32 eax, ebx, ecx, edx;
522
Thomas Renningerd1896042010-11-03 17:06:14 +0100523 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200524 return 1;
525
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200526 if (c->cpuid_level < MWAIT_INFO)
527 return 0;
528
529 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
530 /* Check, whether EDX has extended info about MWAIT */
531 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
532 return 1;
533
534 /*
535 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
536 * C1 supports MWAIT
537 */
538 return (edx & MWAIT_EDX_C1);
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200539}
540
Len Brown02c68a02011-04-01 16:59:53 -0400541bool amd_e400_c1e_detected;
542EXPORT_SYMBOL(amd_e400_c1e_detected);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200543
Len Brown02c68a02011-04-01 16:59:53 -0400544static cpumask_var_t amd_e400_c1e_mask;
Thomas Gleixner4faac972008-09-22 18:54:29 +0200545
Len Brown02c68a02011-04-01 16:59:53 -0400546void amd_e400_remove_cpu(int cpu)
Thomas Gleixner4faac972008-09-22 18:54:29 +0200547{
Len Brown02c68a02011-04-01 16:59:53 -0400548 if (amd_e400_c1e_mask != NULL)
549 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner4faac972008-09-22 18:54:29 +0200550}
551
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200552/*
Len Brown02c68a02011-04-01 16:59:53 -0400553 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200554 * pending message MSR. If we detect C1E, then we handle it the same
555 * way as C3 power states (local apic timer and TSC stop)
556 */
Len Brown02c68a02011-04-01 16:59:53 -0400557static void amd_e400_idle(void)
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200558{
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200559 if (need_resched())
560 return;
561
Len Brown02c68a02011-04-01 16:59:53 -0400562 if (!amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200563 u32 lo, hi;
564
565 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
Michal Schmidte8c534e2010-07-27 18:53:35 +0200566
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200567 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
Len Brown02c68a02011-04-01 16:59:53 -0400568 amd_e400_c1e_detected = true;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800569 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andreas Herrmann09bfeea2008-09-18 21:12:10 +0200570 mark_tsc_unstable("TSC halt in AMD C1E");
571 printk(KERN_INFO "System has AMD C1E enabled\n");
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200572 }
573 }
574
Len Brown02c68a02011-04-01 16:59:53 -0400575 if (amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200576 int cpu = smp_processor_id();
577
Len Brown02c68a02011-04-01 16:59:53 -0400578 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
579 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200580 /*
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700581 * Force broadcast so ACPI can not interfere.
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200582 */
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200583 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
584 &cpu);
585 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
586 cpu);
587 }
588 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200589
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200590 default_idle();
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200591
592 /*
593 * The switch back from broadcast mode needs to be
594 * called with interrupts disabled.
595 */
596 local_irq_disable();
597 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
598 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200599 } else
600 default_idle();
601}
602
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200603void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
604{
Ingo Molnar3e5095d2009-01-27 17:07:08 +0100605#ifdef CONFIG_SMP
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200606 if (pm_idle == poll_idle && smp_num_siblings > 1) {
Mike Travisd6dd6922010-03-05 13:10:38 -0600607 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200608 " performance may degrade.\n");
609 }
610#endif
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200611 if (pm_idle)
612 return;
613
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200614 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200615 /*
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200616 * One CPU supports mwait => All CPUs supports mwait
617 */
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200618 printk(KERN_INFO "using mwait in idle threads.\n");
619 pm_idle = mwait_idle;
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200620 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
621 /* E400: APIC timer interrupt does not wake up CPU from C1e */
Len Brown02c68a02011-04-01 16:59:53 -0400622 printk(KERN_INFO "using AMD E400 aware idle routine\n");
623 pm_idle = amd_e400_idle;
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200624 } else
625 pm_idle = default_idle;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200626}
627
Len Brown02c68a02011-04-01 16:59:53 -0400628void __init init_amd_e400_c1e_mask(void)
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030629{
Len Brown02c68a02011-04-01 16:59:53 -0400630 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
631 if (pm_idle == amd_e400_idle)
632 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030633}
634
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200635static int __init idle_setup(char *str)
636{
Cyrill Gorcunovab6bc3e2008-07-05 15:53:36 +0400637 if (!str)
638 return -EINVAL;
639
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200640 if (!strcmp(str, "poll")) {
641 printk("using polling idle threads.\n");
642 pm_idle = poll_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100643 boot_option_idle_override = IDLE_POLL;
644 } else if (!strcmp(str, "mwait")) {
645 boot_option_idle_override = IDLE_FORCE_MWAIT;
646 } else if (!strcmp(str, "halt")) {
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800647 /*
648 * When the boot option of idle=halt is added, halt is
649 * forced to be used for CPU idle. In such case CPU C2/C3
650 * won't be used again.
651 * To continue to load the CPU idle driver, don't touch
652 * the boot_option_idle_override.
653 */
654 pm_idle = default_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100655 boot_option_idle_override = IDLE_HALT;
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800656 } else if (!strcmp(str, "nomwait")) {
657 /*
658 * If the boot option of "idle=nomwait" is added,
659 * it means that mwait will be disabled for CPU C2/C3
660 * states. In such case it won't touch the variable
661 * of boot_option_idle_override.
662 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100663 boot_option_idle_override = IDLE_NOMWAIT;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800664 } else
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200665 return -1;
666
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200667 return 0;
668}
669early_param("idle", idle_setup);
670
Amerigo Wang9d62dcd2009-05-11 22:05:28 -0400671unsigned long arch_align_stack(unsigned long sp)
672{
673 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
674 sp -= get_random_int() % 8192;
675 return sp & ~0xf;
676}
677
678unsigned long arch_randomize_brk(struct mm_struct *mm)
679{
680 unsigned long range_end = mm->brk + 0x02000000;
681 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
682}
683