blob: 23b328edc2b3048b4c4c80a50f7fa9865f37a876 [file] [log] [blame]
Suresh Siddha61c46282008-03-10 15:28:04 -07001#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
Jaswinder Singhf0bc2202008-12-17 23:20:05 +05304#include <asm/idle.h>
Suresh Siddha61c46282008-03-10 15:28:04 -07005#include <linux/smp.h>
6#include <linux/slab.h>
7#include <linux/sched.h>
Peter Zijlstra7f424a82008-04-25 17:39:01 +02008#include <linux/module.h>
9#include <linux/pm.h>
Thomas Gleixneraa276e12008-06-09 19:15:00 +020010#include <linux/clockchips.h>
Frederic Weisbecker12922112009-02-07 22:16:12 +010011#include <trace/power.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080012#include <asm/system.h>
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +010013#include <asm/apic.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080014
15unsigned long idle_halt;
16EXPORT_SYMBOL(idle_halt);
Zhao Yakuida5e09a2008-06-24 18:01:09 +080017unsigned long idle_nomwait;
18EXPORT_SYMBOL(idle_nomwait);
Suresh Siddha61c46282008-03-10 15:28:04 -070019
Suresh Siddhaaa283f42008-03-10 15:28:05 -070020struct kmem_cache *task_xstate_cachep;
Suresh Siddha61c46282008-03-10 15:28:04 -070021
Jason Baronb5f9fd02009-02-11 13:57:25 -050022DEFINE_TRACE(power_start);
23DEFINE_TRACE(power_end);
24
Suresh Siddha61c46282008-03-10 15:28:04 -070025int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
26{
27 *dst = *src;
Suresh Siddhaaa283f42008-03-10 15:28:05 -070028 if (src->thread.xstate) {
29 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
30 GFP_KERNEL);
31 if (!dst->thread.xstate)
32 return -ENOMEM;
33 WARN_ON((unsigned long)dst->thread.xstate & 15);
34 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
35 }
Suresh Siddha61c46282008-03-10 15:28:04 -070036 return 0;
37}
38
Suresh Siddhaaa283f42008-03-10 15:28:05 -070039void free_thread_xstate(struct task_struct *tsk)
40{
41 if (tsk->thread.xstate) {
42 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
43 tsk->thread.xstate = NULL;
44 }
45}
46
Suresh Siddha61c46282008-03-10 15:28:04 -070047void free_thread_info(struct thread_info *ti)
48{
Suresh Siddhaaa283f42008-03-10 15:28:05 -070049 free_thread_xstate(ti->task);
Suresh Siddha1679f272008-04-16 10:27:53 +020050 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
Suresh Siddha61c46282008-03-10 15:28:04 -070051}
52
53void arch_task_cache_init(void)
54{
55 task_xstate_cachep =
56 kmem_cache_create("task_xstate", xstate_size,
57 __alignof__(union thread_xstate),
58 SLAB_PANIC, NULL);
59}
Peter Zijlstra7f424a82008-04-25 17:39:01 +020060
Thomas Gleixner00dba562008-06-09 18:35:28 +020061/*
62 * Idle related variables and functions
63 */
64unsigned long boot_option_idle_override = 0;
65EXPORT_SYMBOL(boot_option_idle_override);
66
67/*
68 * Powermanagement idle function, if any..
69 */
70void (*pm_idle)(void);
71EXPORT_SYMBOL(pm_idle);
72
73#ifdef CONFIG_X86_32
74/*
75 * This halt magic was a workaround for ancient floppy DMA
76 * wreckage. It should be safe to remove.
77 */
78static int hlt_counter;
79void disable_hlt(void)
80{
81 hlt_counter++;
82}
83EXPORT_SYMBOL(disable_hlt);
84
85void enable_hlt(void)
86{
87 hlt_counter--;
88}
89EXPORT_SYMBOL(enable_hlt);
90
91static inline int hlt_use_halt(void)
92{
93 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
94}
95#else
96static inline int hlt_use_halt(void)
97{
98 return 1;
99}
100#endif
101
102/*
103 * We use this if we don't have any better
104 * idle routine..
105 */
106void default_idle(void)
107{
108 if (hlt_use_halt()) {
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800109 struct power_trace it;
110
111 trace_power_start(&it, POWER_CSTATE, 1);
Thomas Gleixner00dba562008-06-09 18:35:28 +0200112 current_thread_info()->status &= ~TS_POLLING;
113 /*
114 * TS_POLLING-cleared state must be visible before we
115 * test NEED_RESCHED:
116 */
117 smp_mb();
118
119 if (!need_resched())
120 safe_halt(); /* enables interrupts racelessly */
121 else
122 local_irq_enable();
123 current_thread_info()->status |= TS_POLLING;
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800124 trace_power_end(&it);
Thomas Gleixner00dba562008-06-09 18:35:28 +0200125 } else {
126 local_irq_enable();
127 /* loop is done by the caller */
128 cpu_relax();
129 }
130}
131#ifdef CONFIG_APM_MODULE
132EXPORT_SYMBOL(default_idle);
133#endif
134
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100135void stop_this_cpu(void *dummy)
136{
137 local_irq_disable();
138 /*
139 * Remove this CPU:
140 */
141 cpu_clear(smp_processor_id(), cpu_online_map);
142 disable_local_APIC();
143
144 for (;;) {
145 if (hlt_works(smp_processor_id()))
146 halt();
147 }
148}
149
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200150static void do_nothing(void *unused)
151{
152}
153
154/*
155 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
156 * pm_idle and update to new pm_idle value. Required while changing pm_idle
157 * handler on SMP systems.
158 *
159 * Caller must have changed pm_idle to the new value before the call. Old
160 * pm_idle value will not be used by any CPU after the return of this function.
161 */
162void cpu_idle_wait(void)
163{
164 smp_mb();
165 /* kick all the CPUs so that they exit out of pm_idle */
Ingo Molnar127a2372008-06-27 11:48:22 +0200166 smp_call_function(do_nothing, NULL, 1);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200167}
168EXPORT_SYMBOL_GPL(cpu_idle_wait);
169
170/*
171 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
172 * which can obviate IPI to trigger checking of need_resched.
173 * We execute MONITOR against need_resched and enter optimized wait state
174 * through MWAIT. Whenever someone changes need_resched, we would be woken
175 * up from MWAIT (without an IPI).
176 *
177 * New with Core Duo processors, MWAIT can take some hints based on CPU
178 * capability.
179 */
180void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
181{
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800182 struct power_trace it;
183
184 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200185 if (!need_resched()) {
Pallipadi, Venkateshe736ad52009-02-06 16:52:05 -0800186 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
187 clflush((void *)&current_thread_info()->flags);
188
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200189 __monitor((void *)&current_thread_info()->flags, 0, 0);
190 smp_mb();
191 if (!need_resched())
192 __mwait(ax, cx);
193 }
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800194 trace_power_end(&it);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200195}
196
197/* Default MONITOR/MWAIT with no hints, used for default C1 state */
198static void mwait_idle(void)
199{
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800200 struct power_trace it;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200201 if (!need_resched()) {
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800202 trace_power_start(&it, POWER_CSTATE, 1);
Pallipadi, Venkateshe736ad52009-02-06 16:52:05 -0800203 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
204 clflush((void *)&current_thread_info()->flags);
205
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200206 __monitor((void *)&current_thread_info()->flags, 0, 0);
207 smp_mb();
208 if (!need_resched())
209 __sti_mwait(0, 0);
210 else
211 local_irq_enable();
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800212 trace_power_end(&it);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200213 } else
214 local_irq_enable();
215}
216
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200217/*
218 * On SMP it's slightly faster (but much more power-consuming!)
219 * to poll the ->work.need_resched flag instead of waiting for the
220 * cross-CPU IPI to arrive. Use this option with caution.
221 */
222static void poll_idle(void)
223{
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800224 struct power_trace it;
225
226 trace_power_start(&it, POWER_CSTATE, 0);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200227 local_irq_enable();
Joe Korty2c7e9fd2008-08-27 10:35:06 -0400228 while (!need_resched())
229 cpu_relax();
Arjan van de Venf3f47a62008-11-23 16:49:58 -0800230 trace_power_end(&it);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200231}
232
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200233/*
234 * mwait selection logic:
235 *
236 * It depends on the CPU. For AMD CPUs that support MWAIT this is
237 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
238 * then depend on a clock divisor and current Pstate of the core. If
239 * all cores of a processor are in halt state (C1) the processor can
240 * enter the C1E (C1 enhanced) state. If mwait is used this will never
241 * happen.
242 *
243 * idle=mwait overrides this decision and forces the usage of mwait.
244 */
Jan Beulich08ad8af2008-07-18 13:45:20 +0100245static int __cpuinitdata force_mwait;
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200246
247#define MWAIT_INFO 0x05
248#define MWAIT_ECX_EXTENDED_INFO 0x01
249#define MWAIT_EDX_C1 0xf0
250
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200251static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
252{
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200253 u32 eax, ebx, ecx, edx;
254
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200255 if (force_mwait)
256 return 1;
257
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200258 if (c->cpuid_level < MWAIT_INFO)
259 return 0;
260
261 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
262 /* Check, whether EDX has extended info about MWAIT */
263 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
264 return 1;
265
266 /*
267 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
268 * C1 supports MWAIT
269 */
270 return (edx & MWAIT_EDX_C1);
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200271}
272
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200273/*
274 * Check for AMD CPUs, which have potentially C1E support
275 */
276static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
277{
278 if (c->x86_vendor != X86_VENDOR_AMD)
279 return 0;
280
281 if (c->x86 < 0x0F)
282 return 0;
283
284 /* Family 0x0f models < rev F do not have C1E */
285 if (c->x86 == 0x0f && c->x86_model < 0x40)
286 return 0;
287
288 return 1;
289}
290
Thomas Gleixner4faac972008-09-22 18:54:29 +0200291static cpumask_t c1e_mask = CPU_MASK_NONE;
292static int c1e_detected;
293
294void c1e_remove_cpu(int cpu)
295{
296 cpu_clear(cpu, c1e_mask);
297}
298
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200299/*
300 * C1E aware idle routine. We check for C1E active in the interrupt
301 * pending message MSR. If we detect C1E, then we handle it the same
302 * way as C3 power states (local apic timer and TSC stop)
303 */
304static void c1e_idle(void)
305{
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200306 if (need_resched())
307 return;
308
309 if (!c1e_detected) {
310 u32 lo, hi;
311
312 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
313 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
314 c1e_detected = 1;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800315 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andreas Herrmann09bfeea2008-09-18 21:12:10 +0200316 mark_tsc_unstable("TSC halt in AMD C1E");
317 printk(KERN_INFO "System has AMD C1E enabled\n");
Thomas Gleixnera8d68292008-09-22 19:02:25 +0200318 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200319 }
320 }
321
322 if (c1e_detected) {
323 int cpu = smp_processor_id();
324
325 if (!cpu_isset(cpu, c1e_mask)) {
326 cpu_set(cpu, c1e_mask);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200327 /*
328 * Force broadcast so ACPI can not interfere. Needs
329 * to run with interrupts enabled as it uses
330 * smp_function_call.
331 */
332 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200333 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
334 &cpu);
335 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
336 cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200337 local_irq_disable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200338 }
339 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200340
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200341 default_idle();
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200342
343 /*
344 * The switch back from broadcast mode needs to be
345 * called with interrupts disabled.
346 */
347 local_irq_disable();
348 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
349 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200350 } else
351 default_idle();
352}
353
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200354void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
355{
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200356#ifdef CONFIG_X86_SMP
357 if (pm_idle == poll_idle && smp_num_siblings > 1) {
358 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
359 " performance may degrade.\n");
360 }
361#endif
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200362 if (pm_idle)
363 return;
364
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200365 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200366 /*
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200367 * One CPU supports mwait => All CPUs supports mwait
368 */
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200369 printk(KERN_INFO "using mwait in idle threads.\n");
370 pm_idle = mwait_idle;
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200371 } else if (check_c1e_idle(c)) {
372 printk(KERN_INFO "using C1E aware idle routine\n");
373 pm_idle = c1e_idle;
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200374 } else
375 pm_idle = default_idle;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200376}
377
378static int __init idle_setup(char *str)
379{
Cyrill Gorcunovab6bc3e2008-07-05 15:53:36 +0400380 if (!str)
381 return -EINVAL;
382
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200383 if (!strcmp(str, "poll")) {
384 printk("using polling idle threads.\n");
385 pm_idle = poll_idle;
386 } else if (!strcmp(str, "mwait"))
387 force_mwait = 1;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800388 else if (!strcmp(str, "halt")) {
389 /*
390 * When the boot option of idle=halt is added, halt is
391 * forced to be used for CPU idle. In such case CPU C2/C3
392 * won't be used again.
393 * To continue to load the CPU idle driver, don't touch
394 * the boot_option_idle_override.
395 */
396 pm_idle = default_idle;
397 idle_halt = 1;
398 return 0;
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800399 } else if (!strcmp(str, "nomwait")) {
400 /*
401 * If the boot option of "idle=nomwait" is added,
402 * it means that mwait will be disabled for CPU C2/C3
403 * states. In such case it won't touch the variable
404 * of boot_option_idle_override.
405 */
406 idle_nomwait = 1;
407 return 0;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800408 } else
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200409 return -1;
410
411 boot_option_idle_override = 1;
412 return 0;
413}
414early_param("idle", idle_setup);
415