blob: 429be896a030478b7345ba36902add9cd01f70ff [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
Dominik Brodowskic5ab81c2006-06-24 19:37:00 -04006 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -04009 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/cpufreq.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/acpi.h>
38#include <linux/dmi.h>
39#include <linux/moduleparam.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080040#include <linux/sched.h> /* need_resched() */
Mark Grossf011e2e2008-02-04 22:30:09 -080041#include <linux/pm_qos_params.h>
Thomas Gleixnere9e2cdb2007-02-16 01:28:04 -080042#include <linux/clockchips.h>
Len Brown4f86d3a2007-10-03 18:58:00 -040043#include <linux/cpuidle.h>
Russell Kingba84be22009-01-06 14:41:07 -080044#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Thomas Gleixner34349332007-02-16 01:27:54 -080046/*
47 * Include the apic definitions for x86 to have the APIC timer related defines
48 * available also for UP (on SMP it gets magically included via linux/smp.h).
49 * asm/acpi.h is not an option, as it would require more include magic. Also
50 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
51 */
52#ifdef CONFIG_X86
53#include <asm/apic.h>
54#endif
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <asm/io.h>
57#include <asm/uaccess.h>
58
59#include <acpi/acpi_bus.h>
60#include <acpi/processor.h>
Zhao Yakuic1e3b372008-06-24 17:58:53 +080061#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#define ACPI_PROCESSOR_CLASS "processor"
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#define _COMPONENT ACPI_PROCESSOR_COMPONENT
Len Brownf52fd662007-02-12 22:42:12 -050065ACPI_MODULE_NAME("processor_idle");
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define ACPI_PROCESSOR_FILE_POWER "power"
Ingo Molnar2aa44d02007-08-23 15:18:02 +020067#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
Len Brown4f86d3a2007-10-03 18:58:00 -040068#define C2_OVERHEAD 1 /* 1us */
69#define C3_OVERHEAD 1 /* 1us */
Len Brown4f86d3a2007-10-03 18:58:00 -040070#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Len Brown4f86d3a2007-10-03 18:58:00 -040072static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
73module_param(max_cstate, uint, 0000);
Andreas Mohrb6835052006-04-27 05:25:00 -040074static unsigned int nocst __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075module_param(nocst, uint, 0000);
76
Len Brown25de5712007-12-14 00:24:15 -050077static unsigned int latency_factor __read_mostly = 2;
Len Brown4963f622007-12-13 23:50:45 -050078module_param(latency_factor, uint, 0644);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
alex.shiff69f2b2009-03-04 11:55:26 -080080static s64 us_to_pm_timer_ticks(s64 t)
81{
82 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
83}
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*
85 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
86 * For now disable this. Probably a bug somewhere else.
87 *
88 * To skip this limit, boot/load with a large max_cstate limit.
89 */
Jeff Garzik18552562007-10-03 15:15:40 -040090static int set_max_cstate(const struct dmi_system_id *id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
92 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
93 return 0;
94
Len Brown3d356002005-08-03 00:22:52 -040095 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
Len Brown4be44fc2005-08-05 00:44:28 -040096 " Override with \"processor.max_cstate=%d\"\n", id->ident,
97 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Len Brown3d356002005-08-03 00:22:52 -040099 max_cstate = (long)id->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 return 0;
102}
103
Ashok Raj7ded5682006-02-03 21:51:23 +0100104/* Actually this shouldn't be __cpuinitdata, would be better to fix the
105 callers to only run once -AK */
106static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
Thomas Rosner876c1842006-01-06 01:31:00 -0500107 { set_max_cstate, "Clevo 5600D", {
108 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
109 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
Len Brown4be44fc2005-08-05 00:44:28 -0400110 (void *)2},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 {},
112};
113
Len Brown4f86d3a2007-10-03 18:58:00 -0400114
venkatesh.pallipadi@intel.com2e906652008-01-31 17:35:03 -0800115/*
116 * Callers should disable interrupts before the call and enable
117 * interrupts after return.
118 */
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500119static void acpi_safe_halt(void)
120{
121 current_thread_info()->status &= ~TS_POLLING;
122 /*
123 * TS_POLLING-cleared state must be visible before we
124 * test NEED_RESCHED:
125 */
126 smp_mb();
Venki Pallipadi71e93d12008-03-13 17:18:19 -0700127 if (!need_resched()) {
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500128 safe_halt();
Venki Pallipadi71e93d12008-03-13 17:18:19 -0700129 local_irq_disable();
130 }
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500131 current_thread_info()->status |= TS_POLLING;
132}
133
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800134#ifdef ARCH_APICTIMER_STOPS_ON_C3
135
136/*
137 * Some BIOS implementations switch to C3 in the published C2 state.
Linus Torvalds296d93c2007-03-23 08:03:47 -0700138 * This seems to be a common problem on AMD boxen, but other vendors
139 * are affected too. We pick the most conservative approach: we assume
140 * that the local APIC stops in both C2 and C3.
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800141 */
142static void acpi_timer_check_state(int state, struct acpi_processor *pr,
143 struct acpi_processor_cx *cx)
144{
145 struct acpi_processor_power *pwr = &pr->power;
Thomas Gleixnere585bef2007-03-23 16:08:01 +0100146 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800147
148 /*
149 * Check, if one of the previous states already marked the lapic
150 * unstable
151 */
152 if (pwr->timer_broadcast_on_state < state)
153 return;
154
Thomas Gleixnere585bef2007-03-23 16:08:01 +0100155 if (cx->type >= type)
Linus Torvalds296d93c2007-03-23 08:03:47 -0700156 pr->power.timer_broadcast_on_state = state;
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800157}
158
159static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
160{
Thomas Gleixnere9e2cdb2007-02-16 01:28:04 -0800161 unsigned long reason;
162
163 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
164 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
165
166 clockevents_notify(reason, &pr->id);
Thomas Gleixnere9e2cdb2007-02-16 01:28:04 -0800167}
168
169/* Power(C) State timer broadcast control */
170static void acpi_state_timer_broadcast(struct acpi_processor *pr,
171 struct acpi_processor_cx *cx,
172 int broadcast)
173{
Thomas Gleixnere9e2cdb2007-02-16 01:28:04 -0800174 int state = cx - pr->power.states;
175
176 if (state >= pr->power.timer_broadcast_on_state) {
177 unsigned long reason;
178
179 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
180 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
181 clockevents_notify(reason, &pr->id);
182 }
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800183}
184
185#else
186
187static void acpi_timer_check_state(int state, struct acpi_processor *pr,
188 struct acpi_processor_cx *cstate) { }
189static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
Thomas Gleixnere9e2cdb2007-02-16 01:28:04 -0800190static void acpi_state_timer_broadcast(struct acpi_processor *pr,
191 struct acpi_processor_cx *cx,
192 int broadcast)
193{
194}
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800195
196#endif
197
Thomas Gleixnerb04e7bd2007-09-22 22:29:05 +0000198/*
199 * Suspend / resume control
200 */
201static int acpi_idle_suspend;
202
203int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
204{
205 acpi_idle_suspend = 1;
206 return 0;
207}
208
209int acpi_processor_resume(struct acpi_device * device)
210{
211 acpi_idle_suspend = 0;
212 return 0;
213}
214
Pavel Machek61331162008-02-19 11:00:29 +0100215#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
Andi Kleenddb25f92008-01-30 13:32:41 +0100216static int tsc_halts_in_c(int state)
217{
218 switch (boot_cpu_data.x86_vendor) {
219 case X86_VENDOR_AMD:
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800220 case X86_VENDOR_INTEL:
Andi Kleenddb25f92008-01-30 13:32:41 +0100221 /*
222 * AMD Fam10h TSC will tick in all
223 * C/P/S0/S1 states when this bit is set.
224 */
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800225 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andi Kleenddb25f92008-01-30 13:32:41 +0100226 return 0;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800227
Andi Kleenddb25f92008-01-30 13:32:41 +0100228 /*FALL THROUGH*/
Andi Kleenddb25f92008-01-30 13:32:41 +0100229 default:
230 return state > ACPI_STATE_C1;
231 }
232}
233#endif
234
Len Brown4be44fc2005-08-05 00:44:28 -0400235static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238 if (!pr)
Patrick Mocheld550d982006-06-27 00:41:40 -0400239 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 if (!pr->pblk)
Patrick Mocheld550d982006-06-27 00:41:40 -0400242 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 /* if info is obtained from pblk/fadt, type equals state */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
246 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
247
Venkatesh Pallipadi4c033552005-09-15 12:20:00 -0400248#ifndef CONFIG_HOTPLUG_CPU
249 /*
250 * Check for P_LVL2_UP flag before entering C2 and above on
Len Brown4f86d3a2007-10-03 18:58:00 -0400251 * an SMP system.
Venkatesh Pallipadi4c033552005-09-15 12:20:00 -0400252 */
Alexey Starikovskiyad718602007-02-02 19:48:19 +0300253 if ((num_online_cpus() > 1) &&
Alexey Starikovskiycee324b2007-02-02 19:48:22 +0300254 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
Patrick Mocheld550d982006-06-27 00:41:40 -0400255 return -ENODEV;
Venkatesh Pallipadi4c033552005-09-15 12:20:00 -0400256#endif
257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 /* determine C2 and C3 address from pblk */
259 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
260 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
261
262 /* determine latencies from FADT */
Alexey Starikovskiycee324b2007-02-02 19:48:22 +0300263 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
264 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
267 "lvl2[0x%08x] lvl3[0x%08x]\n",
268 pr->power.states[ACPI_STATE_C2].address,
269 pr->power.states[ACPI_STATE_C3].address));
270
Patrick Mocheld550d982006-06-27 00:41:40 -0400271 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700274static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
Venkatesh Pallipadiacf05f42005-03-31 23:23:15 -0500275{
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700276 if (!pr->power.states[ACPI_STATE_C1].valid) {
277 /* set the first C-State to C1 */
278 /* all processors need to support C1 */
279 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
280 pr->power.states[ACPI_STATE_C1].valid = 1;
Venkatesh Pallipadi0fda6b42008-04-09 21:31:46 -0400281 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700282 }
283 /* the C0 state only exists as a filler in our array */
Venkatesh Pallipadiacf05f42005-03-31 23:23:15 -0500284 pr->power.states[ACPI_STATE_C0].valid = 1;
Patrick Mocheld550d982006-06-27 00:41:40 -0400285 return 0;
Venkatesh Pallipadiacf05f42005-03-31 23:23:15 -0500286}
287
Len Brown4be44fc2005-08-05 00:44:28 -0400288static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Len Brown4be44fc2005-08-05 00:44:28 -0400290 acpi_status status = 0;
291 acpi_integer count;
Janosch Machowinskicf824782005-08-20 08:02:00 -0400292 int current_count;
Len Brown4be44fc2005-08-05 00:44:28 -0400293 int i;
294 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
295 union acpi_object *cst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 if (nocst)
Patrick Mocheld550d982006-06-27 00:41:40 -0400299 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700301 current_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
304 if (ACPI_FAILURE(status)) {
305 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
Patrick Mocheld550d982006-06-27 00:41:40 -0400306 return -ENODEV;
Len Brown4be44fc2005-08-05 00:44:28 -0400307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Jan Engelhardt50dd0962006-10-01 00:28:50 +0200309 cst = buffer.pointer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311 /* There must be at least 2 elements */
312 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
Len Brown64684632006-06-26 23:41:38 -0400313 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 status = -EFAULT;
315 goto end;
316 }
317
318 count = cst->package.elements[0].integer.value;
319
320 /* Validate number of power states. */
321 if (count < 1 || count != cst->package.count - 1) {
Len Brown64684632006-06-26 23:41:38 -0400322 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 status = -EFAULT;
324 goto end;
325 }
326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 /* Tell driver that at least _CST is supported. */
328 pr->flags.has_cst = 1;
329
330 for (i = 1; i <= count; i++) {
331 union acpi_object *element;
332 union acpi_object *obj;
333 struct acpi_power_register *reg;
334 struct acpi_processor_cx cx;
335
336 memset(&cx, 0, sizeof(cx));
337
Jan Engelhardt50dd0962006-10-01 00:28:50 +0200338 element = &(cst->package.elements[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (element->type != ACPI_TYPE_PACKAGE)
340 continue;
341
342 if (element->package.count != 4)
343 continue;
344
Jan Engelhardt50dd0962006-10-01 00:28:50 +0200345 obj = &(element->package.elements[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 if (obj->type != ACPI_TYPE_BUFFER)
348 continue;
349
Len Brown4be44fc2005-08-05 00:44:28 -0400350 reg = (struct acpi_power_register *)obj->buffer.pointer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
Len Brown4be44fc2005-08-05 00:44:28 -0400353 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 continue;
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /* There should be an easy way to extract an integer... */
Jan Engelhardt50dd0962006-10-01 00:28:50 +0200357 obj = &(element->package.elements[1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 if (obj->type != ACPI_TYPE_INTEGER)
359 continue;
360
361 cx.type = obj->integer.value;
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700362 /*
363 * Some buggy BIOSes won't list C1 in _CST -
364 * Let acpi_processor_get_power_info_default() handle them later
365 */
366 if (i == 1 && cx.type != ACPI_STATE_C1)
367 current_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700369 cx.address = reg->address;
370 cx.index = current_count + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800372 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700373 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
374 if (acpi_processor_ffh_cstate_probe
375 (pr->id, &cx, reg) == 0) {
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800376 cx.entry_method = ACPI_CSTATE_FFH;
377 } else if (cx.type == ACPI_STATE_C1) {
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700378 /*
379 * C1 is a special case where FIXED_HARDWARE
380 * can be handled in non-MWAIT way as well.
381 * In that case, save this _CST entry info.
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700382 * Otherwise, ignore this info and continue.
383 */
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800384 cx.entry_method = ACPI_CSTATE_HALT;
Venkatesh Pallipadi4fcb2fc2008-02-11 17:46:31 -0800385 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800386 } else {
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700387 continue;
388 }
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800389 if (cx.type == ACPI_STATE_C1 &&
390 (idle_halt || idle_nomwait)) {
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800391 /*
392 * In most cases the C1 space_id obtained from
393 * _CST object is FIXED_HARDWARE access mode.
394 * But when the option of idle=halt is added,
395 * the entry_method type should be changed from
396 * CSTATE_FFH to CSTATE_HALT.
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800397 * When the option of idle=nomwait is added,
398 * the C1 entry_method type should be
399 * CSTATE_HALT.
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800400 */
401 cx.entry_method = ACPI_CSTATE_HALT;
402 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
403 }
Venkatesh Pallipadi4fcb2fc2008-02-11 17:46:31 -0800404 } else {
405 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
406 cx.address);
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Venkatesh Pallipadi0fda6b42008-04-09 21:31:46 -0400409 if (cx.type == ACPI_STATE_C1) {
410 cx.valid = 1;
411 }
Venkatesh Pallipadi4fcb2fc2008-02-11 17:46:31 -0800412
Jan Engelhardt50dd0962006-10-01 00:28:50 +0200413 obj = &(element->package.elements[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (obj->type != ACPI_TYPE_INTEGER)
415 continue;
416
417 cx.latency = obj->integer.value;
418
Jan Engelhardt50dd0962006-10-01 00:28:50 +0200419 obj = &(element->package.elements[3]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 if (obj->type != ACPI_TYPE_INTEGER)
421 continue;
422
423 cx.power = obj->integer.value;
424
Janosch Machowinskicf824782005-08-20 08:02:00 -0400425 current_count++;
426 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
427
428 /*
429 * We support total ACPI_PROCESSOR_MAX_POWER - 1
430 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
431 */
432 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
433 printk(KERN_WARNING
434 "Limiting number of power states to max (%d)\n",
435 ACPI_PROCESSOR_MAX_POWER);
436 printk(KERN_WARNING
437 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
438 break;
439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 }
441
Len Brown4be44fc2005-08-05 00:44:28 -0400442 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
Janosch Machowinskicf824782005-08-20 08:02:00 -0400443 current_count));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445 /* Validate number of power states discovered */
Janosch Machowinskicf824782005-08-20 08:02:00 -0400446 if (current_count < 2)
Venkatesh Pallipadi6d93c642005-09-15 12:19:00 -0400447 status = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Len Brown4be44fc2005-08-05 00:44:28 -0400449 end:
Len Brown02438d82006-06-30 03:19:10 -0400450 kfree(buffer.pointer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Patrick Mocheld550d982006-06-27 00:41:40 -0400452 return status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453}
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
456{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
458 if (!cx->address)
Patrick Mocheld550d982006-06-27 00:41:40 -0400459 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 /*
462 * C2 latency must be less than or equal to 100
463 * microseconds.
464 */
465 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
466 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
Len Brown4be44fc2005-08-05 00:44:28 -0400467 "latency too large [%d]\n", cx->latency));
Patrick Mocheld550d982006-06-27 00:41:40 -0400468 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 }
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 /*
472 * Otherwise we've met all of our C2 requirements.
473 * Normalize the C2 latency to expidite policy
474 */
475 cx->valid = 1;
Len Brown4f86d3a2007-10-03 18:58:00 -0400476
Len Brown4f86d3a2007-10-03 18:58:00 -0400477 cx->latency_ticks = cx->latency;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Patrick Mocheld550d982006-06-27 00:41:40 -0400479 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
Len Brown4be44fc2005-08-05 00:44:28 -0400482static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
483 struct acpi_processor_cx *cx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400485 static int bm_check_flag;
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 if (!cx->address)
Patrick Mocheld550d982006-06-27 00:41:40 -0400489 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491 /*
492 * C3 latency must be less than or equal to 1000
493 * microseconds.
494 */
495 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
496 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
Len Brown4be44fc2005-08-05 00:44:28 -0400497 "latency too large [%d]\n", cx->latency));
Patrick Mocheld550d982006-06-27 00:41:40 -0400498 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 /*
502 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
503 * DMA transfers are used by any ISA device to avoid livelock.
504 * Note that we could disable Type-F DMA (as recommended by
505 * the erratum), but this is known to disrupt certain ISA
506 * devices thus we take the conservative approach.
507 */
508 else if (errata.piix4.fdma) {
509 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
Len Brown4be44fc2005-08-05 00:44:28 -0400510 "C3 not supported on PIIX4 with Type-F DMA\n"));
Patrick Mocheld550d982006-06-27 00:41:40 -0400511 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 }
513
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400514 /* All the logic here assumes flags.bm_check is same across all CPUs */
515 if (!bm_check_flag) {
516 /* Determine whether bm_check is needed based on CPU */
517 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
518 bm_check_flag = pr->flags.bm_check;
519 } else {
520 pr->flags.bm_check = bm_check_flag;
521 }
522
523 if (pr->flags.bm_check) {
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400524 if (!pr->flags.bm_control) {
Venki Pallipadied3110e2007-07-31 12:04:31 -0700525 if (pr->flags.has_cst != 1) {
526 /* bus mastering control is necessary */
527 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
528 "C3 support requires BM control\n"));
529 return;
530 } else {
531 /* Here we enter C3 without bus mastering */
532 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
533 "C3 support without BM control\n"));
534 }
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400535 }
536 } else {
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400537 /*
538 * WBINVD should be set in fadt, for C3 state to be
539 * supported on when bm_check is not required.
540 */
Alexey Starikovskiycee324b2007-02-02 19:48:22 +0300541 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400542 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
Len Brown4be44fc2005-08-05 00:44:28 -0400543 "Cache invalidation should work properly"
544 " for C3 to be enabled on SMP systems\n"));
Patrick Mocheld550d982006-06-27 00:41:40 -0400545 return;
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400546 }
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -0400547 }
548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 /*
550 * Otherwise we've met all of our C3 requirements.
551 * Normalize the C3 latency to expidite policy. Enable
552 * checking of bus mastering status (bm_check) so we can
553 * use this in our C3 policy
554 */
555 cx->valid = 1;
Len Brown4f86d3a2007-10-03 18:58:00 -0400556
Len Brown4f86d3a2007-10-03 18:58:00 -0400557 cx->latency_ticks = cx->latency;
Len Brown31878dd2009-01-28 18:28:09 -0500558 /*
559 * On older chipsets, BM_RLD needs to be set
560 * in order for Bus Master activity to wake the
561 * system from C3. Newer chipsets handle DMA
562 * during C3 automatically and BM_RLD is a NOP.
563 * In either case, the proper way to
564 * handle BM_RLD is to set it and leave it set.
565 */
Bob Moore50ffba12009-02-23 15:02:07 +0800566 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Patrick Mocheld550d982006-06-27 00:41:40 -0400568 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571static int acpi_processor_power_verify(struct acpi_processor *pr)
572{
573 unsigned int i;
574 unsigned int working = 0;
Venkatesh Pallipadi6eb0a0f2006-01-11 22:44:21 +0100575
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800576 pr->power.timer_broadcast_on_state = INT_MAX;
Venkatesh Pallipadi6eb0a0f2006-01-11 22:44:21 +0100577
Len Brown4be44fc2005-08-05 00:44:28 -0400578 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 struct acpi_processor_cx *cx = &pr->power.states[i];
580
581 switch (cx->type) {
582 case ACPI_STATE_C1:
583 cx->valid = 1;
584 break;
585
586 case ACPI_STATE_C2:
587 acpi_processor_power_verify_c2(cx);
Linus Torvalds296d93c2007-03-23 08:03:47 -0700588 if (cx->valid)
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800589 acpi_timer_check_state(i, pr, cx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 break;
591
592 case ACPI_STATE_C3:
593 acpi_processor_power_verify_c3(pr, cx);
Linus Torvalds296d93c2007-03-23 08:03:47 -0700594 if (cx->valid)
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800595 acpi_timer_check_state(i, pr, cx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 break;
597 }
598
599 if (cx->valid)
600 working++;
601 }
602
Thomas Gleixner169a0ab2007-02-16 01:27:55 -0800603 acpi_propagate_timer_broadcast(pr);
Andi Kleenbd663342006-03-25 16:31:07 +0100604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 return (working);
606}
607
Len Brown4be44fc2005-08-05 00:44:28 -0400608static int acpi_processor_get_power_info(struct acpi_processor *pr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
610 unsigned int i;
611 int result;
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 /* NOTE: the idle thread may not be running while calling
615 * this function */
616
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700617 /* Zero initialize all the C-states info. */
618 memset(pr->power.states, 0, sizeof(pr->power.states));
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 result = acpi_processor_get_power_info_cst(pr);
Venkatesh Pallipadi6d93c642005-09-15 12:19:00 -0400621 if (result == -ENODEV)
Darrick J. Wongc5a114f2006-10-19 23:28:28 -0700622 result = acpi_processor_get_power_info_fadt(pr);
Venkatesh Pallipadi6d93c642005-09-15 12:19:00 -0400623
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700624 if (result)
625 return result;
626
627 acpi_processor_get_power_info_default(pr);
628
Janosch Machowinskicf824782005-08-20 08:02:00 -0400629 pr->power.count = acpi_processor_power_verify(pr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 /*
632 * if one state of type C2 or C3 is available, mark this
633 * CPU as being "idle manageable"
634 */
635 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
Venkatesh Pallipadiacf05f42005-03-31 23:23:15 -0500636 if (pr->power.states[i].valid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 pr->power.count = i;
Linus Torvalds2203d6e2005-11-18 07:29:51 -0800638 if (pr->power.states[i].type >= ACPI_STATE_C2)
639 pr->flags.power = 1;
Venkatesh Pallipadiacf05f42005-03-31 23:23:15 -0500640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
642
Patrick Mocheld550d982006-06-27 00:41:40 -0400643 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644}
645
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
647{
Jan Engelhardt50dd0962006-10-01 00:28:50 +0200648 struct acpi_processor *pr = seq->private;
Len Brown4be44fc2005-08-05 00:44:28 -0400649 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 if (!pr)
653 goto end;
654
655 seq_printf(seq, "active state: C%zd\n"
Len Brown4be44fc2005-08-05 00:44:28 -0400656 "max_cstate: C%d\n"
Arjan van de Ven5c875792006-09-30 23:27:17 -0700657 "bus master activity: %08x\n"
658 "maximum allowed latency: %d usec\n",
Len Brown4be44fc2005-08-05 00:44:28 -0400659 pr->power.state ? pr->power.state - pr->power.states : 0,
Arjan van de Ven5c875792006-09-30 23:27:17 -0700660 max_cstate, (unsigned)pr->power.bm_activity,
Mark Grossf011e2e2008-02-04 22:30:09 -0800661 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
663 seq_puts(seq, "states:\n");
664
665 for (i = 1; i <= pr->power.count; i++) {
666 seq_printf(seq, " %cC%d: ",
Len Brown4be44fc2005-08-05 00:44:28 -0400667 (&pr->power.states[i] ==
668 pr->power.state ? '*' : ' '), i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670 if (!pr->power.states[i].valid) {
671 seq_puts(seq, "<not supported>\n");
672 continue;
673 }
674
675 switch (pr->power.states[i].type) {
676 case ACPI_STATE_C1:
677 seq_printf(seq, "type[C1] ");
678 break;
679 case ACPI_STATE_C2:
680 seq_printf(seq, "type[C2] ");
681 break;
682 case ACPI_STATE_C3:
683 seq_printf(seq, "type[C3] ");
684 break;
685 default:
686 seq_printf(seq, "type[--] ");
687 break;
688 }
689
690 if (pr->power.states[i].promotion.state)
691 seq_printf(seq, "promotion[C%zd] ",
Len Brown4be44fc2005-08-05 00:44:28 -0400692 (pr->power.states[i].promotion.state -
693 pr->power.states));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 else
695 seq_puts(seq, "promotion[--] ");
696
697 if (pr->power.states[i].demotion.state)
698 seq_printf(seq, "demotion[C%zd] ",
Len Brown4be44fc2005-08-05 00:44:28 -0400699 (pr->power.states[i].demotion.state -
700 pr->power.states));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 else
702 seq_puts(seq, "demotion[--] ");
703
Dominik Brodowskia3c65982006-06-24 19:37:00 -0400704 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
Len Brown4be44fc2005-08-05 00:44:28 -0400705 pr->power.states[i].latency,
Dominik Brodowskia3c65982006-06-24 19:37:00 -0400706 pr->power.states[i].usage,
Alexey Starikovskiyb0b7eaa2007-01-25 22:39:44 -0500707 (unsigned long long)pr->power.states[i].time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
709
Len Brown4be44fc2005-08-05 00:44:28 -0400710 end:
Patrick Mocheld550d982006-06-27 00:41:40 -0400711 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712}
713
714static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
715{
716 return single_open(file, acpi_processor_power_seq_show,
Len Brown4be44fc2005-08-05 00:44:28 -0400717 PDE(inode)->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718}
719
Arjan van de Vend7508032006-07-04 13:06:00 -0400720static const struct file_operations acpi_processor_power_fops = {
Denis V. Lunevcf7acfa2008-04-29 01:02:27 -0700721 .owner = THIS_MODULE,
Len Brown4be44fc2005-08-05 00:44:28 -0400722 .open = acpi_processor_power_open_fs,
723 .read = seq_read,
724 .llseek = seq_lseek,
725 .release = single_release,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726};
727
Len Brown4f86d3a2007-10-03 18:58:00 -0400728
729/**
730 * acpi_idle_bm_check - checks if bus master activity was detected
731 */
732static int acpi_idle_bm_check(void)
733{
734 u32 bm_status = 0;
735
Bob Moore50ffba12009-02-23 15:02:07 +0800736 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
Len Brown4f86d3a2007-10-03 18:58:00 -0400737 if (bm_status)
Bob Moore50ffba12009-02-23 15:02:07 +0800738 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
Len Brown4f86d3a2007-10-03 18:58:00 -0400739 /*
740 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
741 * the true state of bus mastering activity; forcing us to
742 * manually check the BMIDEA bit of each IDE channel.
743 */
744 else if (errata.piix4.bmisx) {
745 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
746 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
747 bm_status = 1;
748 }
749 return bm_status;
750}
751
752/**
Len Brown4f86d3a2007-10-03 18:58:00 -0400753 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
754 * @cx: cstate data
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800755 *
756 * Caller disables interrupt before call and enables interrupt after return.
Len Brown4f86d3a2007-10-03 18:58:00 -0400757 */
758static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
759{
Ingo Molnar95fd4842009-02-11 09:22:04 +0100760 u64 perf_flags;
761
Steven Rostedtdcf30992008-07-25 18:00:42 -0400762 /* Don't trace irqs off for idle */
763 stop_critical_timings();
Ingo Molnar95fd4842009-02-11 09:22:04 +0100764 perf_flags = hw_perf_save_disable();
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800765 if (cx->entry_method == ACPI_CSTATE_FFH) {
Len Brown4f86d3a2007-10-03 18:58:00 -0400766 /* Call into architectural FFH based C-state */
767 acpi_processor_ffh_cstate_enter(cx);
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800768 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
769 acpi_safe_halt();
Len Brown4f86d3a2007-10-03 18:58:00 -0400770 } else {
771 int unused;
772 /* IO port based C-state */
773 inb(cx->address);
774 /* Dummy wait op - must do something useless after P_LVL2 read
775 because chipsets cannot guarantee that STPCLK# signal
776 gets asserted in time to freeze execution properly. */
777 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
778 }
Ingo Molnar95fd4842009-02-11 09:22:04 +0100779 hw_perf_restore(perf_flags);
Steven Rostedtdcf30992008-07-25 18:00:42 -0400780 start_critical_timings();
Len Brown4f86d3a2007-10-03 18:58:00 -0400781}
782
783/**
784 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
785 * @dev: the target CPU
786 * @state: the state data
787 *
788 * This is equivalent to the HALT instruction.
789 */
790static int acpi_idle_enter_c1(struct cpuidle_device *dev,
791 struct cpuidle_state *state)
792{
alex.shiff69f2b2009-03-04 11:55:26 -0800793 ktime_t kt1, kt2;
794 s64 idle_time;
Len Brown4f86d3a2007-10-03 18:58:00 -0400795 struct acpi_processor *pr;
796 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
venkatesh.pallipadi@intel.com9b12e182008-01-31 17:35:05 -0800797
Mike Travis706546d2008-06-09 16:22:23 -0700798 pr = __get_cpu_var(processors);
Len Brown4f86d3a2007-10-03 18:58:00 -0400799
800 if (unlikely(!pr))
801 return 0;
802
venkatesh.pallipadi@intel.com2e906652008-01-31 17:35:03 -0800803 local_irq_disable();
Venkatesh Pallipadib077fba2008-02-11 15:20:27 -0800804
805 /* Do not access any ACPI IO ports in suspend path */
806 if (acpi_idle_suspend) {
807 acpi_safe_halt();
808 local_irq_enable();
809 return 0;
810 }
811
alex.shiff69f2b2009-03-04 11:55:26 -0800812 kt1 = ktime_get_real();
venkatesh.pallipadi@intel.combc71bec2008-01-31 17:35:04 -0800813 acpi_idle_do_entry(cx);
alex.shiff69f2b2009-03-04 11:55:26 -0800814 kt2 = ktime_get_real();
815 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
Len Brown4f86d3a2007-10-03 18:58:00 -0400816
venkatesh.pallipadi@intel.com2e906652008-01-31 17:35:03 -0800817 local_irq_enable();
Len Brown4f86d3a2007-10-03 18:58:00 -0400818 cx->usage++;
819
alex.shiff69f2b2009-03-04 11:55:26 -0800820 return idle_time;
Len Brown4f86d3a2007-10-03 18:58:00 -0400821}
822
823/**
824 * acpi_idle_enter_simple - enters an ACPI state without BM handling
825 * @dev: the target CPU
826 * @state: the state data
827 */
828static int acpi_idle_enter_simple(struct cpuidle_device *dev,
829 struct cpuidle_state *state)
830{
831 struct acpi_processor *pr;
832 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
alex.shiff69f2b2009-03-04 11:55:26 -0800833 ktime_t kt1, kt2;
834 s64 idle_time;
835 s64 sleep_ticks = 0;
Venkatesh Pallipadi50629112007-11-19 19:49:00 -0500836
Mike Travis706546d2008-06-09 16:22:23 -0700837 pr = __get_cpu_var(processors);
Len Brown4f86d3a2007-10-03 18:58:00 -0400838
839 if (unlikely(!pr))
840 return 0;
841
Len Browne1964412007-10-04 01:23:47 -0400842 if (acpi_idle_suspend)
843 return(acpi_idle_enter_c1(dev, state));
844
Len Brown4f86d3a2007-10-03 18:58:00 -0400845 local_irq_disable();
846 current_thread_info()->status &= ~TS_POLLING;
847 /*
848 * TS_POLLING-cleared state must be visible before we test
849 * NEED_RESCHED:
850 */
851 smp_mb();
852
853 if (unlikely(need_resched())) {
854 current_thread_info()->status |= TS_POLLING;
855 local_irq_enable();
856 return 0;
857 }
858
Thomas Gleixnere17bcb42007-12-07 19:16:17 +0100859 /*
860 * Must be done before busmaster disable as we might need to
861 * access HPET !
862 */
863 acpi_state_timer_broadcast(pr, cx, 1);
864
Len Brown4f86d3a2007-10-03 18:58:00 -0400865 if (cx->type == ACPI_STATE_C3)
866 ACPI_FLUSH_CPU_CACHE();
867
alex.shiff69f2b2009-03-04 11:55:26 -0800868 kt1 = ktime_get_real();
Venkatesh Pallipadi50629112007-11-19 19:49:00 -0500869 /* Tell the scheduler that we are going deep-idle: */
870 sched_clock_idle_sleep_event();
Len Brown4f86d3a2007-10-03 18:58:00 -0400871 acpi_idle_do_entry(cx);
alex.shiff69f2b2009-03-04 11:55:26 -0800872 kt2 = ktime_get_real();
873 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
Len Brown4f86d3a2007-10-03 18:58:00 -0400874
Pavel Machek61331162008-02-19 11:00:29 +0100875#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
Len Brown4f86d3a2007-10-03 18:58:00 -0400876 /* TSC could halt in idle, so notify users */
Andi Kleenddb25f92008-01-30 13:32:41 +0100877 if (tsc_halts_in_c(cx->type))
878 mark_tsc_unstable("TSC halts in idle");;
Len Brown4f86d3a2007-10-03 18:58:00 -0400879#endif
alex.shiff69f2b2009-03-04 11:55:26 -0800880 sleep_ticks = us_to_pm_timer_ticks(idle_time);
Venkatesh Pallipadi50629112007-11-19 19:49:00 -0500881
882 /* Tell the scheduler how much we idled: */
883 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
Len Brown4f86d3a2007-10-03 18:58:00 -0400884
885 local_irq_enable();
886 current_thread_info()->status |= TS_POLLING;
887
888 cx->usage++;
889
890 acpi_state_timer_broadcast(pr, cx, 0);
Venkatesh Pallipadi50629112007-11-19 19:49:00 -0500891 cx->time += sleep_ticks;
alex.shiff69f2b2009-03-04 11:55:26 -0800892 return idle_time;
Len Brown4f86d3a2007-10-03 18:58:00 -0400893}
894
895static int c3_cpu_count;
896static DEFINE_SPINLOCK(c3_lock);
897
898/**
899 * acpi_idle_enter_bm - enters C3 with proper BM handling
900 * @dev: the target CPU
901 * @state: the state data
902 *
903 * If BM is detected, the deepest non-C3 idle state is entered instead.
904 */
905static int acpi_idle_enter_bm(struct cpuidle_device *dev,
906 struct cpuidle_state *state)
907{
908 struct acpi_processor *pr;
909 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
alex.shiff69f2b2009-03-04 11:55:26 -0800910 ktime_t kt1, kt2;
911 s64 idle_time;
912 s64 sleep_ticks = 0;
913
Venkatesh Pallipadi50629112007-11-19 19:49:00 -0500914
Mike Travis706546d2008-06-09 16:22:23 -0700915 pr = __get_cpu_var(processors);
Len Brown4f86d3a2007-10-03 18:58:00 -0400916
917 if (unlikely(!pr))
918 return 0;
919
Len Browne1964412007-10-04 01:23:47 -0400920 if (acpi_idle_suspend)
921 return(acpi_idle_enter_c1(dev, state));
922
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500923 if (acpi_idle_bm_check()) {
924 if (dev->safe_state) {
Venkatesh Pallipadiaddbad42008-09-29 15:24:28 -0700925 dev->last_state = dev->safe_state;
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500926 return dev->safe_state->enter(dev, dev->safe_state);
927 } else {
venkatesh.pallipadi@intel.com2e906652008-01-31 17:35:03 -0800928 local_irq_disable();
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500929 acpi_safe_halt();
venkatesh.pallipadi@intel.com2e906652008-01-31 17:35:03 -0800930 local_irq_enable();
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500931 return 0;
932 }
933 }
934
Len Brown4f86d3a2007-10-03 18:58:00 -0400935 local_irq_disable();
936 current_thread_info()->status &= ~TS_POLLING;
937 /*
938 * TS_POLLING-cleared state must be visible before we test
939 * NEED_RESCHED:
940 */
941 smp_mb();
942
943 if (unlikely(need_resched())) {
944 current_thread_info()->status |= TS_POLLING;
945 local_irq_enable();
946 return 0;
947 }
948
Venki Pallipadi996520c2008-03-24 14:24:10 -0700949 acpi_unlazy_tlb(smp_processor_id());
950
Venkatesh Pallipadi50629112007-11-19 19:49:00 -0500951 /* Tell the scheduler that we are going deep-idle: */
952 sched_clock_idle_sleep_event();
Len Brown4f86d3a2007-10-03 18:58:00 -0400953 /*
954 * Must be done before busmaster disable as we might need to
955 * access HPET !
956 */
957 acpi_state_timer_broadcast(pr, cx, 1);
958
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500959 /*
960 * disable bus master
961 * bm_check implies we need ARB_DIS
962 * !bm_check implies we need cache flush
963 * bm_control implies whether we can do ARB_DIS
964 *
965 * That leaves a case where bm_check is set and bm_control is
966 * not set. In that case we cannot do much, we enter C3
967 * without doing anything.
968 */
969 if (pr->flags.bm_check && pr->flags.bm_control) {
Len Brown4f86d3a2007-10-03 18:58:00 -0400970 spin_lock(&c3_lock);
971 c3_cpu_count++;
972 /* Disable bus master arbitration when all CPUs are in C3 */
973 if (c3_cpu_count == num_online_cpus())
Bob Moore50ffba12009-02-23 15:02:07 +0800974 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
Len Brown4f86d3a2007-10-03 18:58:00 -0400975 spin_unlock(&c3_lock);
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500976 } else if (!pr->flags.bm_check) {
977 ACPI_FLUSH_CPU_CACHE();
978 }
Len Brown4f86d3a2007-10-03 18:58:00 -0400979
alex.shiff69f2b2009-03-04 11:55:26 -0800980 kt1 = ktime_get_real();
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500981 acpi_idle_do_entry(cx);
alex.shiff69f2b2009-03-04 11:55:26 -0800982 kt2 = ktime_get_real();
983 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
Len Brown4f86d3a2007-10-03 18:58:00 -0400984
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -0500985 /* Re-enable bus master arbitration */
986 if (pr->flags.bm_check && pr->flags.bm_control) {
Len Brown4f86d3a2007-10-03 18:58:00 -0400987 spin_lock(&c3_lock);
Bob Moore50ffba12009-02-23 15:02:07 +0800988 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
Len Brown4f86d3a2007-10-03 18:58:00 -0400989 c3_cpu_count--;
990 spin_unlock(&c3_lock);
991 }
992
Pavel Machek61331162008-02-19 11:00:29 +0100993#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
Len Brown4f86d3a2007-10-03 18:58:00 -0400994 /* TSC could halt in idle, so notify users */
Andi Kleenddb25f92008-01-30 13:32:41 +0100995 if (tsc_halts_in_c(ACPI_STATE_C3))
996 mark_tsc_unstable("TSC halts in idle");
Len Brown4f86d3a2007-10-03 18:58:00 -0400997#endif
alex.shiff69f2b2009-03-04 11:55:26 -0800998 sleep_ticks = us_to_pm_timer_ticks(idle_time);
Venkatesh Pallipadi50629112007-11-19 19:49:00 -0500999 /* Tell the scheduler how much we idled: */
1000 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
Len Brown4f86d3a2007-10-03 18:58:00 -04001001
1002 local_irq_enable();
1003 current_thread_info()->status |= TS_POLLING;
1004
1005 cx->usage++;
1006
1007 acpi_state_timer_broadcast(pr, cx, 0);
Venkatesh Pallipadi50629112007-11-19 19:49:00 -05001008 cx->time += sleep_ticks;
alex.shiff69f2b2009-03-04 11:55:26 -08001009 return idle_time;
Len Brown4f86d3a2007-10-03 18:58:00 -04001010}
1011
1012struct cpuidle_driver acpi_idle_driver = {
1013 .name = "acpi_idle",
1014 .owner = THIS_MODULE,
1015};
1016
1017/**
1018 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1019 * @pr: the ACPI processor
1020 */
1021static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1022{
venkatesh.pallipadi@intel.com9a0b8412008-01-31 17:35:06 -08001023 int i, count = CPUIDLE_DRIVER_STATE_START;
Len Brown4f86d3a2007-10-03 18:58:00 -04001024 struct acpi_processor_cx *cx;
1025 struct cpuidle_state *state;
1026 struct cpuidle_device *dev = &pr->power.dev;
1027
1028 if (!pr->flags.power_setup_done)
1029 return -EINVAL;
1030
1031 if (pr->flags.power == 0) {
1032 return -EINVAL;
1033 }
1034
Venkatesh Pallipadidcb84f32008-05-19 19:09:27 -04001035 dev->cpu = pr->id;
Venkatesh Pallipadi4fcb2fc2008-02-11 17:46:31 -08001036 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1037 dev->states[i].name[0] = '\0';
1038 dev->states[i].desc[0] = '\0';
1039 }
1040
Len Brown4f86d3a2007-10-03 18:58:00 -04001041 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1042 cx = &pr->power.states[i];
1043 state = &dev->states[count];
1044
1045 if (!cx->valid)
1046 continue;
1047
1048#ifdef CONFIG_HOTPLUG_CPU
1049 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1050 !pr->flags.has_cst &&
1051 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1052 continue;
1053#endif
1054 cpuidle_set_statedata(state, cx);
1055
1056 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
Venkatesh Pallipadi4fcb2fc2008-02-11 17:46:31 -08001057 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
Len Brown4f86d3a2007-10-03 18:58:00 -04001058 state->exit_latency = cx->latency;
Len Brown4963f622007-12-13 23:50:45 -05001059 state->target_residency = cx->latency * latency_factor;
Len Brown4f86d3a2007-10-03 18:58:00 -04001060 state->power_usage = cx->power;
1061
1062 state->flags = 0;
1063 switch (cx->type) {
1064 case ACPI_STATE_C1:
1065 state->flags |= CPUIDLE_FLAG_SHALLOW;
Venki Pallipadi8e92b662008-02-29 10:24:32 -08001066 if (cx->entry_method == ACPI_CSTATE_FFH)
1067 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1068
Len Brown4f86d3a2007-10-03 18:58:00 -04001069 state->enter = acpi_idle_enter_c1;
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -05001070 dev->safe_state = state;
Len Brown4f86d3a2007-10-03 18:58:00 -04001071 break;
1072
1073 case ACPI_STATE_C2:
1074 state->flags |= CPUIDLE_FLAG_BALANCED;
1075 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1076 state->enter = acpi_idle_enter_simple;
Venkatesh Pallipadiddc081a2007-11-19 21:43:22 -05001077 dev->safe_state = state;
Len Brown4f86d3a2007-10-03 18:58:00 -04001078 break;
1079
1080 case ACPI_STATE_C3:
1081 state->flags |= CPUIDLE_FLAG_DEEP;
1082 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1083 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1084 state->enter = pr->flags.bm_check ?
1085 acpi_idle_enter_bm :
1086 acpi_idle_enter_simple;
1087 break;
1088 }
1089
1090 count++;
venkatesh.pallipadi@intel.com9a0b8412008-01-31 17:35:06 -08001091 if (count == CPUIDLE_STATE_MAX)
1092 break;
Len Brown4f86d3a2007-10-03 18:58:00 -04001093 }
1094
1095 dev->state_count = count;
1096
1097 if (!count)
1098 return -EINVAL;
1099
Len Brown4f86d3a2007-10-03 18:58:00 -04001100 return 0;
1101}
1102
1103int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1104{
Venkatesh Pallipadidcb84f32008-05-19 19:09:27 -04001105 int ret = 0;
Len Brown4f86d3a2007-10-03 18:58:00 -04001106
Venkatesh Pallipadi36a91352008-04-30 13:57:15 -04001107 if (boot_option_idle_override)
1108 return 0;
1109
Len Brown4f86d3a2007-10-03 18:58:00 -04001110 if (!pr)
1111 return -EINVAL;
1112
1113 if (nocst) {
1114 return -ENODEV;
1115 }
1116
1117 if (!pr->flags.power_setup_done)
1118 return -ENODEV;
1119
1120 cpuidle_pause_and_lock();
1121 cpuidle_disable_device(&pr->power.dev);
1122 acpi_processor_get_power_info(pr);
Venkatesh Pallipadidcb84f32008-05-19 19:09:27 -04001123 if (pr->flags.power) {
1124 acpi_processor_setup_cpuidle(pr);
1125 ret = cpuidle_enable_device(&pr->power.dev);
1126 }
Len Brown4f86d3a2007-10-03 18:58:00 -04001127 cpuidle_resume_and_unlock();
1128
1129 return ret;
1130}
1131
Pierre Ossman7af8b662006-10-10 14:20:31 -07001132int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
Len Brown4be44fc2005-08-05 00:44:28 -04001133 struct acpi_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134{
Len Brown4be44fc2005-08-05 00:44:28 -04001135 acpi_status status = 0;
Andreas Mohrb6835052006-04-27 05:25:00 -04001136 static int first_run;
Len Brown4be44fc2005-08-05 00:44:28 -04001137 struct proc_dir_entry *entry = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 unsigned int i;
1139
Venkatesh Pallipadi36a91352008-04-30 13:57:15 -04001140 if (boot_option_idle_override)
1141 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
1143 if (!first_run) {
Zhao Yakuic1e3b372008-06-24 17:58:53 +08001144 if (idle_halt) {
1145 /*
1146 * When the boot option of "idle=halt" is added, halt
1147 * is used for CPU IDLE.
1148 * In such case C2/C3 is meaningless. So the max_cstate
1149 * is set to one.
1150 */
1151 max_cstate = 1;
1152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 dmi_check_system(processor_power_dmi_table);
Alexey Starikovskiyc1c30632007-11-26 20:42:19 +01001154 max_cstate = acpi_processor_cstate_check(max_cstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 if (max_cstate < ACPI_C_STATES_MAX)
Len Brown4be44fc2005-08-05 00:44:28 -04001156 printk(KERN_NOTICE
1157 "ACPI: processor limited to max C-state %d\n",
1158 max_cstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 first_run++;
1160 }
1161
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -04001162 if (!pr)
Patrick Mocheld550d982006-06-27 00:41:40 -04001163 return -EINVAL;
Venkatesh Pallipadi02df8b92005-04-15 15:07:10 -04001164
Alexey Starikovskiycee324b2007-02-02 19:48:22 +03001165 if (acpi_gbl_FADT.cst_control && !nocst) {
Len Brown4be44fc2005-08-05 00:44:28 -04001166 status =
Alexey Starikovskiycee324b2007-02-02 19:48:22 +03001167 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 if (ACPI_FAILURE(status)) {
Thomas Renningera6fc6722006-06-26 23:58:43 -04001169 ACPI_EXCEPTION((AE_INFO, status,
1170 "Notifying BIOS of _CST ability failed"));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172 }
1173
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 acpi_processor_get_power_info(pr);
Len Brown4f86d3a2007-10-03 18:58:00 -04001175 pr->flags.power_setup_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
1177 /*
1178 * Install the idle handler if processor power management is supported.
1179 * Note that we use previously set idle handler will be used on
1180 * platforms that only support C1.
1181 */
Venkatesh Pallipadi36a91352008-04-30 13:57:15 -04001182 if (pr->flags.power) {
Len Brown4f86d3a2007-10-03 18:58:00 -04001183 acpi_processor_setup_cpuidle(pr);
Len Brown4f86d3a2007-10-03 18:58:00 -04001184 if (cpuidle_register_device(&pr->power.dev))
1185 return -EIO;
Len Brown4f86d3a2007-10-03 18:58:00 -04001186
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1188 for (i = 1; i <= pr->power.count; i++)
1189 if (pr->power.states[i].valid)
Len Brown4be44fc2005-08-05 00:44:28 -04001190 printk(" C%d[C%d]", i,
1191 pr->power.states[i].type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 printk(")\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 }
1194
1195 /* 'power' [R] */
Denis V. Lunevcf7acfa2008-04-29 01:02:27 -07001196 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1197 S_IRUGO, acpi_device_dir(device),
1198 &acpi_processor_power_fops,
1199 acpi_driver_data(device));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 if (!entry)
Thomas Renningera6fc6722006-06-26 23:58:43 -04001201 return -EIO;
Patrick Mocheld550d982006-06-27 00:41:40 -04001202 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203}
1204
Len Brown4be44fc2005-08-05 00:44:28 -04001205int acpi_processor_power_exit(struct acpi_processor *pr,
1206 struct acpi_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207{
Venkatesh Pallipadi36a91352008-04-30 13:57:15 -04001208 if (boot_option_idle_override)
1209 return 0;
1210
Venkatesh Pallipadidcb84f32008-05-19 19:09:27 -04001211 cpuidle_unregister_device(&pr->power.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 pr->flags.power_setup_done = 0;
1213
1214 if (acpi_device_dir(device))
Len Brown4be44fc2005-08-05 00:44:28 -04001215 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1216 acpi_device_dir(device));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Patrick Mocheld550d982006-06-27 00:41:40 -04001218 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219}