blob: ebcfcceccc725f61bb8d0b17cfcc67c78dcd0df6 [file] [log] [blame]
Suresh Siddha61c46282008-03-10 15:28:04 -07001#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/smp.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -08005#include <linux/prctl.h>
Suresh Siddha61c46282008-03-10 15:28:04 -07006#include <linux/slab.h>
7#include <linux/sched.h>
Peter Zijlstra7f424a82008-04-25 17:39:01 +02008#include <linux/module.h>
9#include <linux/pm.h>
Thomas Gleixneraa276e12008-06-09 19:15:00 +020010#include <linux/clockchips.h>
Amerigo Wang9d62dcd2009-05-11 22:05:28 -040011#include <linux/random.h>
Avi Kivity7c68af62009-09-19 09:40:22 +030012#include <linux/user-return-notifier.h>
Andy Isaacson814e2c82009-12-08 00:29:42 -080013#include <linux/dmi.h>
14#include <linux/utsname.h>
Arjan van de Ven61613522009-09-17 16:11:28 +020015#include <trace/events/power.h>
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020016#include <linux/hw_breakpoint.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080017#include <asm/system.h>
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +010018#include <asm/apic.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053019#include <asm/syscalls.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080020#include <asm/idle.h>
21#include <asm/uaccess.h>
22#include <asm/i387.h>
K.Prasad66cb5912009-06-01 23:44:55 +053023#include <asm/debugreg.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080024
25unsigned long idle_halt;
26EXPORT_SYMBOL(idle_halt);
Zhao Yakuida5e09a2008-06-24 18:01:09 +080027unsigned long idle_nomwait;
28EXPORT_SYMBOL(idle_nomwait);
Suresh Siddha61c46282008-03-10 15:28:04 -070029
Suresh Siddhaaa283f42008-03-10 15:28:05 -070030struct kmem_cache *task_xstate_cachep;
Sheng Yang5ee481d2010-05-17 17:22:23 +080031EXPORT_SYMBOL_GPL(task_xstate_cachep);
Suresh Siddha61c46282008-03-10 15:28:04 -070032
33int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
34{
Avi Kivity86603282010-05-06 11:45:46 +030035 int ret;
36
Suresh Siddha61c46282008-03-10 15:28:04 -070037 *dst = *src;
Avi Kivity86603282010-05-06 11:45:46 +030038 if (fpu_allocated(&src->thread.fpu)) {
39 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
40 ret = fpu_alloc(&dst->thread.fpu);
41 if (ret)
42 return ret;
43 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070044 }
Suresh Siddha61c46282008-03-10 15:28:04 -070045 return 0;
46}
47
Suresh Siddhaaa283f42008-03-10 15:28:05 -070048void free_thread_xstate(struct task_struct *tsk)
49{
Avi Kivity86603282010-05-06 11:45:46 +030050 fpu_free(&tsk->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070051}
52
Suresh Siddha61c46282008-03-10 15:28:04 -070053void free_thread_info(struct thread_info *ti)
54{
Suresh Siddhaaa283f42008-03-10 15:28:05 -070055 free_thread_xstate(ti->task);
Suresh Siddha1679f272008-04-16 10:27:53 +020056 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
Suresh Siddha61c46282008-03-10 15:28:04 -070057}
58
59void arch_task_cache_init(void)
60{
61 task_xstate_cachep =
62 kmem_cache_create("task_xstate", xstate_size,
63 __alignof__(union thread_xstate),
Vegard Nossum2dff4402008-05-31 15:56:17 +020064 SLAB_PANIC | SLAB_NOTRACK, NULL);
Suresh Siddha61c46282008-03-10 15:28:04 -070065}
Peter Zijlstra7f424a82008-04-25 17:39:01 +020066
Thomas Gleixner00dba562008-06-09 18:35:28 +020067/*
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080068 * Free current thread data structures etc..
69 */
70void exit_thread(void)
71{
72 struct task_struct *me = current;
73 struct thread_struct *t = &me->thread;
Thomas Gleixner250981e2009-03-16 13:07:21 +010074 unsigned long *bp = t->io_bitmap_ptr;
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080075
Thomas Gleixner250981e2009-03-16 13:07:21 +010076 if (bp) {
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080077 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
78
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080079 t->io_bitmap_ptr = NULL;
80 clear_thread_flag(TIF_IO_BITMAP);
81 /*
82 * Careful, clear this in the TSS too:
83 */
84 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
85 t->io_bitmap_max = 0;
86 put_cpu();
Thomas Gleixner250981e2009-03-16 13:07:21 +010087 kfree(bp);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080088 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080089}
90
Brian Gerst3bef4442010-01-13 10:45:55 -050091void show_regs(struct pt_regs *regs)
92{
93 show_registers(regs);
94 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs),
95 regs->bp);
96}
97
Andy Isaacson814e2c82009-12-08 00:29:42 -080098void show_regs_common(void)
99{
Andy Isaacsona1884b82009-12-08 00:30:21 -0800100 const char *board, *product;
Andy Isaacson814e2c82009-12-08 00:29:42 -0800101
Andy Isaacsona1884b82009-12-08 00:30:21 -0800102 board = dmi_get_system_info(DMI_BOARD_NAME);
Andy Isaacson814e2c82009-12-08 00:29:42 -0800103 if (!board)
104 board = "";
Andy Isaacsona1884b82009-12-08 00:30:21 -0800105 product = dmi_get_system_info(DMI_PRODUCT_NAME);
106 if (!product)
107 product = "";
Andy Isaacson814e2c82009-12-08 00:29:42 -0800108
Pekka Enbergd015a092009-12-28 10:26:59 +0200109 printk(KERN_CONT "\n");
110 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
Andy Isaacson814e2c82009-12-08 00:29:42 -0800111 current->pid, current->comm, print_tainted(),
112 init_utsname()->release,
113 (int)strcspn(init_utsname()->version, " "),
Andy Isaacsona1884b82009-12-08 00:30:21 -0800114 init_utsname()->version, board, product);
Andy Isaacson814e2c82009-12-08 00:29:42 -0800115}
116
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800117void flush_thread(void)
118{
119 struct task_struct *tsk = current;
120
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200121 flush_ptrace_hw_breakpoint(tsk);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800122 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
123 /*
124 * Forget coprocessor state..
125 */
126 tsk->fpu_counter = 0;
127 clear_fpu(tsk);
128 clear_used_math();
129}
130
131static void hard_disable_TSC(void)
132{
133 write_cr4(read_cr4() | X86_CR4_TSD);
134}
135
136void disable_TSC(void)
137{
138 preempt_disable();
139 if (!test_and_set_thread_flag(TIF_NOTSC))
140 /*
141 * Must flip the CPU state synchronously with
142 * TIF_NOTSC in the current running context.
143 */
144 hard_disable_TSC();
145 preempt_enable();
146}
147
148static void hard_enable_TSC(void)
149{
150 write_cr4(read_cr4() & ~X86_CR4_TSD);
151}
152
153static void enable_TSC(void)
154{
155 preempt_disable();
156 if (test_and_clear_thread_flag(TIF_NOTSC))
157 /*
158 * Must flip the CPU state synchronously with
159 * TIF_NOTSC in the current running context.
160 */
161 hard_enable_TSC();
162 preempt_enable();
163}
164
165int get_tsc_mode(unsigned long adr)
166{
167 unsigned int val;
168
169 if (test_thread_flag(TIF_NOTSC))
170 val = PR_TSC_SIGSEGV;
171 else
172 val = PR_TSC_ENABLE;
173
174 return put_user(val, (unsigned int __user *)adr);
175}
176
177int set_tsc_mode(unsigned int val)
178{
179 if (val == PR_TSC_SIGSEGV)
180 disable_TSC();
181 else if (val == PR_TSC_ENABLE)
182 enable_TSC();
183 else
184 return -EINVAL;
185
186 return 0;
187}
188
189void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
190 struct tss_struct *tss)
191{
192 struct thread_struct *prev, *next;
193
194 prev = &prev_p->thread;
195 next = &next_p->thread;
196
Peter Zijlstraea8e61b2010-03-25 14:51:51 +0100197 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
198 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
199 unsigned long debugctl = get_debugctlmsr();
200
201 debugctl &= ~DEBUGCTLMSR_BTF;
202 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
203 debugctl |= DEBUGCTLMSR_BTF;
204
205 update_debugctlmsr(debugctl);
206 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800207
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800208 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
209 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
210 /* prev and next are different */
211 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
212 hard_disable_TSC();
213 else
214 hard_enable_TSC();
215 }
216
217 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
218 /*
219 * Copy the relevant range of the IO bitmap.
220 * Normally this is 128 bytes or less:
221 */
222 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
223 max(prev->io_bitmap_max, next->io_bitmap_max));
224 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
225 /*
226 * Clear any possible leftover bits:
227 */
228 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
229 }
Avi Kivity7c68af62009-09-19 09:40:22 +0300230 propagate_user_return_notify(prev_p, next_p);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800231}
232
233int sys_fork(struct pt_regs *regs)
234{
235 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
236}
237
238/*
239 * This is trivial, and on the face of it looks like it
240 * could equally well be done in user mode.
241 *
242 * Not so, for quite unobvious reasons - register pressure.
243 * In user mode vfork() cannot have a stack frame, and if
244 * done by calling the "clone()" system call directly, you
245 * do not have enough call-clobbered registers to hold all
246 * the information you need.
247 */
248int sys_vfork(struct pt_regs *regs)
249{
250 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
251 NULL, NULL);
252}
253
Brian Gerstf839bbc2009-12-09 19:01:56 -0500254long
255sys_clone(unsigned long clone_flags, unsigned long newsp,
256 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
257{
258 if (!newsp)
259 newsp = regs->sp;
260 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
261}
262
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500263/*
264 * This gets run with %si containing the
265 * function to call, and %di containing
266 * the "args".
267 */
268extern void kernel_thread_helper(void);
269
270/*
271 * Create a kernel thread
272 */
273int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
274{
275 struct pt_regs regs;
276
277 memset(&regs, 0, sizeof(regs));
278
279 regs.si = (unsigned long) fn;
280 regs.di = (unsigned long) arg;
281
282#ifdef CONFIG_X86_32
283 regs.ds = __USER_DS;
284 regs.es = __USER_DS;
285 regs.fs = __KERNEL_PERCPU;
286 regs.gs = __KERNEL_STACK_CANARY;
Cyrill Gorcunov864a0922010-01-13 10:16:07 +0000287#else
288 regs.ss = __KERNEL_DS;
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500289#endif
290
291 regs.orig_ax = -1;
292 regs.ip = (unsigned long) kernel_thread_helper;
293 regs.cs = __KERNEL_CS | get_kernel_rpl();
294 regs.flags = X86_EFLAGS_IF | 0x2;
295
296 /* Ok, create the new process.. */
297 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
298}
299EXPORT_SYMBOL(kernel_thread);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800300
301/*
Brian Gerst11cf88b2009-12-09 19:01:53 -0500302 * sys_execve() executes a new program.
303 */
304long sys_execve(char __user *name, char __user * __user *argv,
305 char __user * __user *envp, struct pt_regs *regs)
306{
307 long error;
308 char *filename;
309
310 filename = getname(name);
311 error = PTR_ERR(filename);
312 if (IS_ERR(filename))
313 return error;
314 error = do_execve(filename, argv, envp, regs);
315
316#ifdef CONFIG_X86_32
317 if (error == 0) {
318 /* Make sure we don't return using sysenter.. */
319 set_thread_flag(TIF_IRET);
320 }
321#endif
322
323 putname(filename);
324 return error;
325}
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200326
327/*
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200328 * Idle related variables and functions
329 */
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200330unsigned long boot_option_idle_override = 0;
331EXPORT_SYMBOL(boot_option_idle_override);
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200332
333/*
334 * Powermanagement idle function, if any..
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200335 */
336void (*pm_idle)(void);
337EXPORT_SYMBOL(pm_idle);
338
339#ifdef CONFIG_X86_32
340/*
341 * This halt magic was a workaround for ancient floppy DMA
342 * wreckage. It should be safe to remove.
343 */
344static int hlt_counter;
345void disable_hlt(void)
346{
347 hlt_counter++;
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200348}
349EXPORT_SYMBOL(disable_hlt);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200350
351void enable_hlt(void)
352{
353 hlt_counter--;
354}
355EXPORT_SYMBOL(enable_hlt);
356
357static inline int hlt_use_halt(void)
358{
359 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
360}
361#else
362static inline int hlt_use_halt(void)
363{
364 return 1;
365}
366#endif
367
Thomas Gleixner4faac972008-09-22 18:54:29 +0200368/*
369 * We use this if we don't have any better
370 * idle routine..
371 */
372void default_idle(void)
373{
374 if (hlt_use_halt()) {
Arjan van de Ven61613522009-09-17 16:11:28 +0200375 trace_power_start(POWER_CSTATE, 1);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200376 current_thread_info()->status &= ~TS_POLLING;
377 /*
378 * TS_POLLING-cleared state must be visible before we
379 * test NEED_RESCHED:
380 */
381 smp_mb();
382
383 if (!need_resched())
384 safe_halt(); /* enables interrupts racelessly */
385 else
386 local_irq_enable();
387 current_thread_info()->status |= TS_POLLING;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200388 } else {
389 local_irq_enable();
390 /* loop is done by the caller */
391 cpu_relax();
392 }
393}
394#ifdef CONFIG_APM_MODULE
395EXPORT_SYMBOL(default_idle);
396#endif
397
398void stop_this_cpu(void *dummy)
399{
400 local_irq_disable();
401 /*
402 * Remove this CPU:
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200403 */
Rusty Russell4f062892009-03-13 14:49:54 +1030404 set_cpu_online(smp_processor_id(), false);
Suresh Siddha61c46282008-03-10 15:28:04 -0700405 disable_local_APIC();
406
407 for (;;) {
408 if (hlt_works(smp_processor_id()))
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200409 halt();
410 }
411}
412
413static void do_nothing(void *unused)
414{
415}
416
417/*
418 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
419 * pm_idle and update to new pm_idle value. Required while changing pm_idle
420 * handler on SMP systems.
421 *
422 * Caller must have changed pm_idle to the new value before the call. Old
423 * pm_idle value will not be used by any CPU after the return of this function.
424 */
425void cpu_idle_wait(void)
426{
427 smp_mb();
428 /* kick all the CPUs so that they exit out of pm_idle */
429 smp_call_function(do_nothing, NULL, 1);
430}
431EXPORT_SYMBOL_GPL(cpu_idle_wait);
432
433/*
434 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
435 * which can obviate IPI to trigger checking of need_resched.
436 * We execute MONITOR against need_resched and enter optimized wait state
437 * through MWAIT. Whenever someone changes need_resched, we would be woken
438 * up from MWAIT (without an IPI).
439 *
440 * New with Core Duo processors, MWAIT can take some hints based on CPU
441 * capability.
442 */
443void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
444{
Arjan van de Ven61613522009-09-17 16:11:28 +0200445 trace_power_start(POWER_CSTATE, (ax>>4)+1);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200446 if (!need_resched()) {
447 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
448 clflush((void *)&current_thread_info()->flags);
449
450 __monitor((void *)&current_thread_info()->flags, 0, 0);
451 smp_mb();
452 if (!need_resched())
453 __mwait(ax, cx);
454 }
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200455}
456
457/* Default MONITOR/MWAIT with no hints, used for default C1 state */
458static void mwait_idle(void)
459{
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200460 if (!need_resched()) {
Arjan van de Ven61613522009-09-17 16:11:28 +0200461 trace_power_start(POWER_CSTATE, 1);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200462 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
463 clflush((void *)&current_thread_info()->flags);
464
465 __monitor((void *)&current_thread_info()->flags, 0, 0);
466 smp_mb();
467 if (!need_resched())
468 __sti_mwait(0, 0);
469 else
470 local_irq_enable();
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200471 } else
472 local_irq_enable();
473}
474
475/*
476 * On SMP it's slightly faster (but much more power-consuming!)
477 * to poll the ->work.need_resched flag instead of waiting for the
478 * cross-CPU IPI to arrive. Use this option with caution.
479 */
480static void poll_idle(void)
481{
Arjan van de Ven61613522009-09-17 16:11:28 +0200482 trace_power_start(POWER_CSTATE, 0);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200483 local_irq_enable();
484 while (!need_resched())
485 cpu_relax();
Arjan van de Ven61613522009-09-17 16:11:28 +0200486 trace_power_end(0);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200487}
488
489/*
490 * mwait selection logic:
491 *
492 * It depends on the CPU. For AMD CPUs that support MWAIT this is
493 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
494 * then depend on a clock divisor and current Pstate of the core. If
495 * all cores of a processor are in halt state (C1) the processor can
496 * enter the C1E (C1 enhanced) state. If mwait is used this will never
497 * happen.
498 *
499 * idle=mwait overrides this decision and forces the usage of mwait.
500 */
501static int __cpuinitdata force_mwait;
502
503#define MWAIT_INFO 0x05
504#define MWAIT_ECX_EXTENDED_INFO 0x01
505#define MWAIT_EDX_C1 0xf0
506
507static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
508{
509 u32 eax, ebx, ecx, edx;
510
511 if (force_mwait)
512 return 1;
513
514 if (c->cpuid_level < MWAIT_INFO)
515 return 0;
516
517 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
518 /* Check, whether EDX has extended info about MWAIT */
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200519 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
520 return 1;
521
522 /*
523 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
524 * C1 supports MWAIT
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200525 */
526 return (edx & MWAIT_EDX_C1);
527}
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200528
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200529/*
Andreas Herrmann035a02c2010-03-19 12:09:22 +0100530 * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
531 * For more information see
532 * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
533 * - Erratum #365 for family 0x11 (not affected because C1e not in use)
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200534 */
535static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
536{
Andreas Herrmann035a02c2010-03-19 12:09:22 +0100537 u64 val;
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200538 if (c->x86_vendor != X86_VENDOR_AMD)
Andreas Herrmann035a02c2010-03-19 12:09:22 +0100539 goto no_c1e_idle;
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200540
541 /* Family 0x0f models < rev F do not have C1E */
Andreas Herrmann035a02c2010-03-19 12:09:22 +0100542 if (c->x86 == 0x0F && c->x86_model >= 0x40)
543 return 1;
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200544
Andreas Herrmann035a02c2010-03-19 12:09:22 +0100545 if (c->x86 == 0x10) {
546 /*
547 * check OSVW bit for CPUs that are not affected
548 * by erratum #400
549 */
Andreas Herrmannf0148712010-04-27 12:13:48 +0200550 if (cpu_has(c, X86_FEATURE_OSVW)) {
551 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
552 if (val >= 2) {
553 rdmsrl(MSR_AMD64_OSVW_STATUS, val);
554 if (!(val & BIT(1)))
555 goto no_c1e_idle;
556 }
Andreas Herrmann035a02c2010-03-19 12:09:22 +0100557 }
558 return 1;
559 }
560
561no_c1e_idle:
562 return 0;
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200563}
564
Rusty Russellbc9b83d2009-03-13 14:49:49 +1030565static cpumask_var_t c1e_mask;
Thomas Gleixner4faac972008-09-22 18:54:29 +0200566static int c1e_detected;
567
568void c1e_remove_cpu(int cpu)
569{
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030570 if (c1e_mask != NULL)
571 cpumask_clear_cpu(cpu, c1e_mask);
Thomas Gleixner4faac972008-09-22 18:54:29 +0200572}
573
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200574/*
575 * C1E aware idle routine. We check for C1E active in the interrupt
576 * pending message MSR. If we detect C1E, then we handle it the same
577 * way as C3 power states (local apic timer and TSC stop)
578 */
579static void c1e_idle(void)
580{
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200581 if (need_resched())
582 return;
583
584 if (!c1e_detected) {
585 u32 lo, hi;
586
587 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
588 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
589 c1e_detected = 1;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800590 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andreas Herrmann09bfeea2008-09-18 21:12:10 +0200591 mark_tsc_unstable("TSC halt in AMD C1E");
592 printk(KERN_INFO "System has AMD C1E enabled\n");
Thomas Gleixnera8d68292008-09-22 19:02:25 +0200593 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200594 }
595 }
596
597 if (c1e_detected) {
598 int cpu = smp_processor_id();
599
Rusty Russellbc9b83d2009-03-13 14:49:49 +1030600 if (!cpumask_test_cpu(cpu, c1e_mask)) {
601 cpumask_set_cpu(cpu, c1e_mask);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200602 /*
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700603 * Force broadcast so ACPI can not interfere.
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200604 */
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200605 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
606 &cpu);
607 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
608 cpu);
609 }
610 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200611
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200612 default_idle();
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200613
614 /*
615 * The switch back from broadcast mode needs to be
616 * called with interrupts disabled.
617 */
618 local_irq_disable();
619 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
620 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200621 } else
622 default_idle();
623}
624
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200625void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
626{
Ingo Molnar3e5095d2009-01-27 17:07:08 +0100627#ifdef CONFIG_SMP
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200628 if (pm_idle == poll_idle && smp_num_siblings > 1) {
Mike Travisd6dd6922010-03-05 13:10:38 -0600629 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200630 " performance may degrade.\n");
631 }
632#endif
633 if (pm_idle)
634 return;
635
636 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
Suresh Siddha61c46282008-03-10 15:28:04 -0700637 /*
Suresh Siddha61c46282008-03-10 15:28:04 -0700638 * One CPU supports mwait => All CPUs supports mwait
639 */
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200640 printk(KERN_INFO "using mwait in idle threads.\n");
641 pm_idle = mwait_idle;
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200642 } else if (check_c1e_idle(c)) {
643 printk(KERN_INFO "using C1E aware idle routine\n");
644 pm_idle = c1e_idle;
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200645 } else
646 pm_idle = default_idle;
Suresh Siddha61c46282008-03-10 15:28:04 -0700647}
648
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030649void __init init_c1e_mask(void)
650{
651 /* If we're using c1e_idle, we need to allocate c1e_mask. */
Li Zefan79f55992009-06-15 14:58:26 +0800652 if (pm_idle == c1e_idle)
653 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030654}
655
Suresh Siddha61c46282008-03-10 15:28:04 -0700656static int __init idle_setup(char *str)
657{
Cyrill Gorcunovab6bc3e2008-07-05 15:53:36 +0400658 if (!str)
659 return -EINVAL;
660
Suresh Siddha61c46282008-03-10 15:28:04 -0700661 if (!strcmp(str, "poll")) {
662 printk("using polling idle threads.\n");
663 pm_idle = poll_idle;
664 } else if (!strcmp(str, "mwait"))
665 force_mwait = 1;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800666 else if (!strcmp(str, "halt")) {
667 /*
668 * When the boot option of idle=halt is added, halt is
669 * forced to be used for CPU idle. In such case CPU C2/C3
670 * won't be used again.
671 * To continue to load the CPU idle driver, don't touch
672 * the boot_option_idle_override.
673 */
674 pm_idle = default_idle;
675 idle_halt = 1;
676 return 0;
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800677 } else if (!strcmp(str, "nomwait")) {
678 /*
679 * If the boot option of "idle=nomwait" is added,
680 * it means that mwait will be disabled for CPU C2/C3
681 * states. In such case it won't touch the variable
682 * of boot_option_idle_override.
683 */
684 idle_nomwait = 1;
685 return 0;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800686 } else
Suresh Siddha61c46282008-03-10 15:28:04 -0700687 return -1;
688
689 boot_option_idle_override = 1;
690 return 0;
691}
692early_param("idle", idle_setup);
693
Amerigo Wang9d62dcd2009-05-11 22:05:28 -0400694unsigned long arch_align_stack(unsigned long sp)
695{
696 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
697 sp -= get_random_int() % 8192;
698 return sp & ~0xf;
699}
700
701unsigned long arch_randomize_brk(struct mm_struct *mm)
702{
703 unsigned long range_end = mm->brk + 0x02000000;
704 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
705}
706