blob: ca989158e847936e893c7110b4a328ea1549d452 [file] [log] [blame]
Suresh Siddha61c46282008-03-10 15:28:04 -07001#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/smp.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -08005#include <linux/prctl.h>
Suresh Siddha61c46282008-03-10 15:28:04 -07006#include <linux/slab.h>
7#include <linux/sched.h>
Peter Zijlstra7f424a82008-04-25 17:39:01 +02008#include <linux/module.h>
9#include <linux/pm.h>
Thomas Gleixneraa276e12008-06-09 19:15:00 +020010#include <linux/clockchips.h>
Frederic Weisbecker12922112009-02-07 22:16:12 +010011#include <trace/power.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080012#include <asm/system.h>
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +010013#include <asm/apic.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080014#include <asm/idle.h>
15#include <asm/uaccess.h>
16#include <asm/i387.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080017
18unsigned long idle_halt;
19EXPORT_SYMBOL(idle_halt);
Zhao Yakuida5e09a2008-06-24 18:01:09 +080020unsigned long idle_nomwait;
21EXPORT_SYMBOL(idle_nomwait);
Suresh Siddha61c46282008-03-10 15:28:04 -070022
Suresh Siddhaaa283f42008-03-10 15:28:05 -070023struct kmem_cache *task_xstate_cachep;
Suresh Siddha61c46282008-03-10 15:28:04 -070024
Jason Baronb5f9fd02009-02-11 13:57:25 -050025DEFINE_TRACE(power_start);
26DEFINE_TRACE(power_end);
27
Suresh Siddha61c46282008-03-10 15:28:04 -070028int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
29{
30 *dst = *src;
Suresh Siddhaaa283f42008-03-10 15:28:05 -070031 if (src->thread.xstate) {
32 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
33 GFP_KERNEL);
34 if (!dst->thread.xstate)
35 return -ENOMEM;
36 WARN_ON((unsigned long)dst->thread.xstate & 15);
37 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
38 }
Suresh Siddha61c46282008-03-10 15:28:04 -070039 return 0;
40}
41
Suresh Siddhaaa283f42008-03-10 15:28:05 -070042void free_thread_xstate(struct task_struct *tsk)
43{
44 if (tsk->thread.xstate) {
45 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
46 tsk->thread.xstate = NULL;
47 }
48}
49
Suresh Siddha61c46282008-03-10 15:28:04 -070050void free_thread_info(struct thread_info *ti)
51{
Suresh Siddhaaa283f42008-03-10 15:28:05 -070052 free_thread_xstate(ti->task);
Suresh Siddha1679f272008-04-16 10:27:53 +020053 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
Suresh Siddha61c46282008-03-10 15:28:04 -070054}
55
56void arch_task_cache_init(void)
57{
58 task_xstate_cachep =
59 kmem_cache_create("task_xstate", xstate_size,
60 __alignof__(union thread_xstate),
61 SLAB_PANIC, NULL);
62}
Peter Zijlstra7f424a82008-04-25 17:39:01 +020063
Thomas Gleixner00dba562008-06-09 18:35:28 +020064/*
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080065 * Free current thread data structures etc..
66 */
67void exit_thread(void)
68{
69 struct task_struct *me = current;
70 struct thread_struct *t = &me->thread;
Thomas Gleixner250981e2009-03-16 13:07:21 +010071 unsigned long *bp = t->io_bitmap_ptr;
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080072
Thomas Gleixner250981e2009-03-16 13:07:21 +010073 if (bp) {
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080074 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
75
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080076 t->io_bitmap_ptr = NULL;
77 clear_thread_flag(TIF_IO_BITMAP);
78 /*
79 * Careful, clear this in the TSS too:
80 */
81 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
82 t->io_bitmap_max = 0;
83 put_cpu();
Thomas Gleixner250981e2009-03-16 13:07:21 +010084 kfree(bp);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080085 }
86
87 ds_exit_thread(current);
88}
89
90void flush_thread(void)
91{
92 struct task_struct *tsk = current;
93
94#ifdef CONFIG_X86_64
95 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
96 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
97 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
98 clear_tsk_thread_flag(tsk, TIF_IA32);
99 } else {
100 set_tsk_thread_flag(tsk, TIF_IA32);
101 current_thread_info()->status |= TS_COMPAT;
102 }
103 }
104#endif
105
106 clear_tsk_thread_flag(tsk, TIF_DEBUG);
107
108 tsk->thread.debugreg0 = 0;
109 tsk->thread.debugreg1 = 0;
110 tsk->thread.debugreg2 = 0;
111 tsk->thread.debugreg3 = 0;
112 tsk->thread.debugreg6 = 0;
113 tsk->thread.debugreg7 = 0;
114 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
115 /*
116 * Forget coprocessor state..
117 */
118 tsk->fpu_counter = 0;
119 clear_fpu(tsk);
120 clear_used_math();
121}
122
123static void hard_disable_TSC(void)
124{
125 write_cr4(read_cr4() | X86_CR4_TSD);
126}
127
128void disable_TSC(void)
129{
130 preempt_disable();
131 if (!test_and_set_thread_flag(TIF_NOTSC))
132 /*
133 * Must flip the CPU state synchronously with
134 * TIF_NOTSC in the current running context.
135 */
136 hard_disable_TSC();
137 preempt_enable();
138}
139
140static void hard_enable_TSC(void)
141{
142 write_cr4(read_cr4() & ~X86_CR4_TSD);
143}
144
145static void enable_TSC(void)
146{
147 preempt_disable();
148 if (test_and_clear_thread_flag(TIF_NOTSC))
149 /*
150 * Must flip the CPU state synchronously with
151 * TIF_NOTSC in the current running context.
152 */
153 hard_enable_TSC();
154 preempt_enable();
155}
156
157int get_tsc_mode(unsigned long adr)
158{
159 unsigned int val;
160
161 if (test_thread_flag(TIF_NOTSC))
162 val = PR_TSC_SIGSEGV;
163 else
164 val = PR_TSC_ENABLE;
165
166 return put_user(val, (unsigned int __user *)adr);
167}
168
169int set_tsc_mode(unsigned int val)
170{
171 if (val == PR_TSC_SIGSEGV)
172 disable_TSC();
173 else if (val == PR_TSC_ENABLE)
174 enable_TSC();
175 else
176 return -EINVAL;
177
178 return 0;
179}
180
181void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
182 struct tss_struct *tss)
183{
184 struct thread_struct *prev, *next;
185
186 prev = &prev_p->thread;
187 next = &next_p->thread;
188
189 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
190 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
191 ds_switch_to(prev_p, next_p);
192 else if (next->debugctlmsr != prev->debugctlmsr)
193 update_debugctlmsr(next->debugctlmsr);
194
195 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
196 set_debugreg(next->debugreg0, 0);
197 set_debugreg(next->debugreg1, 1);
198 set_debugreg(next->debugreg2, 2);
199 set_debugreg(next->debugreg3, 3);
200 /* no 4 and 5 */
201 set_debugreg(next->debugreg6, 6);
202 set_debugreg(next->debugreg7, 7);
203 }
204
205 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
206 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
207 /* prev and next are different */
208 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
209 hard_disable_TSC();
210 else
211 hard_enable_TSC();
212 }
213
214 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
215 /*
216 * Copy the relevant range of the IO bitmap.
217 * Normally this is 128 bytes or less:
218 */
219 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
220 max(prev->io_bitmap_max, next->io_bitmap_max));
221 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
222 /*
223 * Clear any possible leftover bits:
224 */
225 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
226 }
227}
228
229int sys_fork(struct pt_regs *regs)
230{
231 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
232}
233
234/*
235 * This is trivial, and on the face of it looks like it
236 * could equally well be done in user mode.
237 *
238 * Not so, for quite unobvious reasons - register pressure.
239 * In user mode vfork() cannot have a stack frame, and if
240 * done by calling the "clone()" system call directly, you
241 * do not have enough call-clobbered registers to hold all
242 * the information you need.
243 */
244int sys_vfork(struct pt_regs *regs)
245{
246 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
247 NULL, NULL);
248}
249
250
251/*
Thomas Gleixner00dba562008-06-09 18:35:28 +0200252 * Idle related variables and functions
253 */
254unsigned long boot_option_idle_override = 0;
255EXPORT_SYMBOL(boot_option_idle_override);
256
257/*
258 * Powermanagement idle function, if any..
259 */
260void (*pm_idle)(void);
261EXPORT_SYMBOL(pm_idle);
262
263#ifdef CONFIG_X86_32
264/*
265 * This halt magic was a workaround for ancient floppy DMA
266 * wreckage. It should be safe to remove.
267 */
268static int hlt_counter;
269void disable_hlt(void)
270{
271 hlt_counter++;
272}
273EXPORT_SYMBOL(disable_hlt);
274
275void enable_hlt(void)
276{
277 hlt_counter--;
278}
279EXPORT_SYMBOL(enable_hlt);
280
281static inline int hlt_use_halt(void)
282{
283 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
284}
285#else
286static inline int hlt_use_halt(void)
287{
288 return 1;
289}
290#endif
291
292/*
293 * We use this if we don't have any better
294 * idle routine..
295 */
296void default_idle(void)
297{
298 if (hlt_use_halt()) {
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800299 struct power_trace it;
300
301 trace_power_start(&it, POWER_CSTATE, 1);
Thomas Gleixner00dba562008-06-09 18:35:28 +0200302 current_thread_info()->status &= ~TS_POLLING;
303 /*
304 * TS_POLLING-cleared state must be visible before we
305 * test NEED_RESCHED:
306 */
307 smp_mb();
308
309 if (!need_resched())
310 safe_halt(); /* enables interrupts racelessly */
311 else
312 local_irq_enable();
313 current_thread_info()->status |= TS_POLLING;
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800314 trace_power_end(&it);
Thomas Gleixner00dba562008-06-09 18:35:28 +0200315 } else {
316 local_irq_enable();
317 /* loop is done by the caller */
318 cpu_relax();
319 }
320}
321#ifdef CONFIG_APM_MODULE
322EXPORT_SYMBOL(default_idle);
323#endif
324
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100325void stop_this_cpu(void *dummy)
326{
327 local_irq_disable();
328 /*
329 * Remove this CPU:
330 */
Rusty Russell4f062892009-03-13 14:49:54 +1030331 set_cpu_online(smp_processor_id(), false);
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100332 disable_local_APIC();
333
334 for (;;) {
335 if (hlt_works(smp_processor_id()))
336 halt();
337 }
338}
339
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200340static void do_nothing(void *unused)
341{
342}
343
344/*
345 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
346 * pm_idle and update to new pm_idle value. Required while changing pm_idle
347 * handler on SMP systems.
348 *
349 * Caller must have changed pm_idle to the new value before the call. Old
350 * pm_idle value will not be used by any CPU after the return of this function.
351 */
352void cpu_idle_wait(void)
353{
354 smp_mb();
355 /* kick all the CPUs so that they exit out of pm_idle */
Ingo Molnar127a2372008-06-27 11:48:22 +0200356 smp_call_function(do_nothing, NULL, 1);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200357}
358EXPORT_SYMBOL_GPL(cpu_idle_wait);
359
360/*
361 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
362 * which can obviate IPI to trigger checking of need_resched.
363 * We execute MONITOR against need_resched and enter optimized wait state
364 * through MWAIT. Whenever someone changes need_resched, we would be woken
365 * up from MWAIT (without an IPI).
366 *
367 * New with Core Duo processors, MWAIT can take some hints based on CPU
368 * capability.
369 */
370void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
371{
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800372 struct power_trace it;
373
374 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200375 if (!need_resched()) {
Pallipadi, Venkateshe736ad52009-02-06 16:52:05 -0800376 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
377 clflush((void *)&current_thread_info()->flags);
378
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200379 __monitor((void *)&current_thread_info()->flags, 0, 0);
380 smp_mb();
381 if (!need_resched())
382 __mwait(ax, cx);
383 }
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800384 trace_power_end(&it);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200385}
386
387/* Default MONITOR/MWAIT with no hints, used for default C1 state */
388static void mwait_idle(void)
389{
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800390 struct power_trace it;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200391 if (!need_resched()) {
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800392 trace_power_start(&it, POWER_CSTATE, 1);
Pallipadi, Venkateshe736ad52009-02-06 16:52:05 -0800393 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
394 clflush((void *)&current_thread_info()->flags);
395
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200396 __monitor((void *)&current_thread_info()->flags, 0, 0);
397 smp_mb();
398 if (!need_resched())
399 __sti_mwait(0, 0);
400 else
401 local_irq_enable();
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800402 trace_power_end(&it);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200403 } else
404 local_irq_enable();
405}
406
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200407/*
408 * On SMP it's slightly faster (but much more power-consuming!)
409 * to poll the ->work.need_resched flag instead of waiting for the
410 * cross-CPU IPI to arrive. Use this option with caution.
411 */
412static void poll_idle(void)
413{
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800414 struct power_trace it;
415
416 trace_power_start(&it, POWER_CSTATE, 0);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200417 local_irq_enable();
Joe Korty2c7e9fd2008-08-27 10:35:06 -0400418 while (!need_resched())
419 cpu_relax();
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800420 trace_power_end(&it);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200421}
422
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200423/*
424 * mwait selection logic:
425 *
426 * It depends on the CPU. For AMD CPUs that support MWAIT this is
427 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
428 * then depend on a clock divisor and current Pstate of the core. If
429 * all cores of a processor are in halt state (C1) the processor can
430 * enter the C1E (C1 enhanced) state. If mwait is used this will never
431 * happen.
432 *
433 * idle=mwait overrides this decision and forces the usage of mwait.
434 */
Jan Beulich08ad8af2008-07-18 13:45:20 +0100435static int __cpuinitdata force_mwait;
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200436
437#define MWAIT_INFO 0x05
438#define MWAIT_ECX_EXTENDED_INFO 0x01
439#define MWAIT_EDX_C1 0xf0
440
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200441static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
442{
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200443 u32 eax, ebx, ecx, edx;
444
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200445 if (force_mwait)
446 return 1;
447
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200448 if (c->cpuid_level < MWAIT_INFO)
449 return 0;
450
451 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
452 /* Check, whether EDX has extended info about MWAIT */
453 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
454 return 1;
455
456 /*
457 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
458 * C1 supports MWAIT
459 */
460 return (edx & MWAIT_EDX_C1);
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200461}
462
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200463/*
464 * Check for AMD CPUs, which have potentially C1E support
465 */
466static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
467{
468 if (c->x86_vendor != X86_VENDOR_AMD)
469 return 0;
470
471 if (c->x86 < 0x0F)
472 return 0;
473
474 /* Family 0x0f models < rev F do not have C1E */
475 if (c->x86 == 0x0f && c->x86_model < 0x40)
476 return 0;
477
478 return 1;
479}
480
Rusty Russellbc9b83d2009-03-13 14:49:49 +1030481static cpumask_var_t c1e_mask;
Thomas Gleixner4faac972008-09-22 18:54:29 +0200482static int c1e_detected;
483
484void c1e_remove_cpu(int cpu)
485{
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030486 if (c1e_mask != NULL)
487 cpumask_clear_cpu(cpu, c1e_mask);
Thomas Gleixner4faac972008-09-22 18:54:29 +0200488}
489
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200490/*
491 * C1E aware idle routine. We check for C1E active in the interrupt
492 * pending message MSR. If we detect C1E, then we handle it the same
493 * way as C3 power states (local apic timer and TSC stop)
494 */
495static void c1e_idle(void)
496{
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200497 if (need_resched())
498 return;
499
500 if (!c1e_detected) {
501 u32 lo, hi;
502
503 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
504 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
505 c1e_detected = 1;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800506 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andreas Herrmann09bfeea2008-09-18 21:12:10 +0200507 mark_tsc_unstable("TSC halt in AMD C1E");
508 printk(KERN_INFO "System has AMD C1E enabled\n");
Thomas Gleixnera8d68292008-09-22 19:02:25 +0200509 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200510 }
511 }
512
513 if (c1e_detected) {
514 int cpu = smp_processor_id();
515
Rusty Russellbc9b83d2009-03-13 14:49:49 +1030516 if (!cpumask_test_cpu(cpu, c1e_mask)) {
517 cpumask_set_cpu(cpu, c1e_mask);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200518 /*
519 * Force broadcast so ACPI can not interfere. Needs
520 * to run with interrupts enabled as it uses
521 * smp_function_call.
522 */
523 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200524 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
525 &cpu);
526 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
527 cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200528 local_irq_disable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200529 }
530 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200531
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200532 default_idle();
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200533
534 /*
535 * The switch back from broadcast mode needs to be
536 * called with interrupts disabled.
537 */
538 local_irq_disable();
539 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
540 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200541 } else
542 default_idle();
543}
544
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200545void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
546{
Ingo Molnar3e5095d2009-01-27 17:07:08 +0100547#ifdef CONFIG_SMP
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200548 if (pm_idle == poll_idle && smp_num_siblings > 1) {
549 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
550 " performance may degrade.\n");
551 }
552#endif
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200553 if (pm_idle)
554 return;
555
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200556 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200557 /*
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200558 * One CPU supports mwait => All CPUs supports mwait
559 */
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200560 printk(KERN_INFO "using mwait in idle threads.\n");
561 pm_idle = mwait_idle;
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200562 } else if (check_c1e_idle(c)) {
563 printk(KERN_INFO "using C1E aware idle routine\n");
564 pm_idle = c1e_idle;
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200565 } else
566 pm_idle = default_idle;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200567}
568
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030569void __init init_c1e_mask(void)
570{
571 /* If we're using c1e_idle, we need to allocate c1e_mask. */
572 if (pm_idle == c1e_idle) {
573 alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
574 cpumask_clear(c1e_mask);
575 }
576}
577
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200578static int __init idle_setup(char *str)
579{
Cyrill Gorcunovab6bc3e2008-07-05 15:53:36 +0400580 if (!str)
581 return -EINVAL;
582
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200583 if (!strcmp(str, "poll")) {
584 printk("using polling idle threads.\n");
585 pm_idle = poll_idle;
586 } else if (!strcmp(str, "mwait"))
587 force_mwait = 1;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800588 else if (!strcmp(str, "halt")) {
589 /*
590 * When the boot option of idle=halt is added, halt is
591 * forced to be used for CPU idle. In such case CPU C2/C3
592 * won't be used again.
593 * To continue to load the CPU idle driver, don't touch
594 * the boot_option_idle_override.
595 */
596 pm_idle = default_idle;
597 idle_halt = 1;
598 return 0;
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800599 } else if (!strcmp(str, "nomwait")) {
600 /*
601 * If the boot option of "idle=nomwait" is added,
602 * it means that mwait will be disabled for CPU C2/C3
603 * states. In such case it won't touch the variable
604 * of boot_option_idle_override.
605 */
606 idle_nomwait = 1;
607 return 0;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800608 } else
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200609 return -1;
610
611 boot_option_idle_override = 1;
612 return 0;
613}
614early_param("idle", idle_setup);
615