blob: 39e321af832486778578de6f84f5e53c75711a01 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/pm2.c
2 *
3 * MSM Power Management Routines
4 *
5 * Copyright (C) 2007 Google, Inc.
Murali Nalajala0df9fee2012-01-12 15:26:09 +05306 * Copyright (c) 2008-2012 Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/clk.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/pm.h>
25#include <linux/pm_qos_params.h>
26#include <linux/proc_fs.h>
27#include <linux/suspend.h>
28#include <linux/reboot.h>
29#include <linux/uaccess.h>
30#include <linux/io.h>
Murali Nalajala8fda4492012-03-19 18:22:59 +053031#include <linux/tick.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032#include <linux/memory.h>
33#ifdef CONFIG_HAS_WAKELOCK
34#include <linux/wakelock.h>
35#endif
36#include <mach/msm_iomap.h>
37#include <mach/system.h>
38#ifdef CONFIG_CPU_V7
39#include <asm/pgtable.h>
40#include <asm/pgalloc.h>
41#endif
42#ifdef CONFIG_CACHE_L2X0
43#include <asm/hardware/cache-l2x0.h>
44#endif
45#ifdef CONFIG_VFP
46#include <asm/vfp.h>
47#endif
48
49#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN
50#include <mach/msm_migrate_pages.h>
51#endif
Murali Nalajala41786ab2012-03-06 10:47:32 +053052#include <mach/socinfo.h>
Anji jonnala1f2377c2012-03-27 14:35:55 +053053#include <asm/smp_scu.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
55#include "smd_private.h"
56#include "smd_rpcrouter.h"
57#include "acpuclock.h"
58#include "clock.h"
59#include "proc_comm.h"
60#include "idle.h"
61#include "irq.h"
62#include "gpio.h"
63#include "timer.h"
Matt Wagantall7cca4642012-02-01 16:43:24 -080064#include "pm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065#include "spm.h"
66#include "sirc.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060067#include "pm-boot.h"
Anji jonnala1f2377c2012-03-27 14:35:55 +053068#define MSM_CORE1_RESET 0xA8600590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069
70/******************************************************************************
71 * Debug Definitions
72 *****************************************************************************/
73
74enum {
Murali Nalajalaa7efba12012-02-23 18:13:52 +053075 MSM_PM_DEBUG_SUSPEND = BIT(0),
76 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
77 MSM_PM_DEBUG_STATE = BIT(2),
78 MSM_PM_DEBUG_CLOCK = BIT(3),
79 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
80 MSM_PM_DEBUG_SMSM_STATE = BIT(5),
81 MSM_PM_DEBUG_IDLE = BIT(6),
82 MSM_PM_DEBUG_HOTPLUG = BIT(7),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083};
84
85static int msm_pm_debug_mask;
Taniya Dase30a6b22012-03-20 11:37:45 +053086int power_collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087module_param_named(
88 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
89);
90
91#define MSM_PM_DPRINTK(mask, level, message, ...) \
92 do { \
93 if ((mask) & msm_pm_debug_mask) \
94 printk(level message, ## __VA_ARGS__); \
95 } while (0)
96
97#define MSM_PM_DEBUG_PRINT_STATE(tag) \
98 do { \
99 MSM_PM_DPRINTK(MSM_PM_DEBUG_STATE, \
100 KERN_INFO, "%s: " \
101 "APPS_CLK_SLEEP_EN %x, APPS_PWRDOWN %x, " \
102 "SMSM_POWER_MASTER_DEM %x, SMSM_MODEM_STATE %x, " \
103 "SMSM_APPS_DEM %x\n", \
104 tag, \
105 __raw_readl(APPS_CLK_SLEEP_EN), \
106 __raw_readl(APPS_PWRDOWN), \
107 smsm_get_state(SMSM_POWER_MASTER_DEM), \
108 smsm_get_state(SMSM_MODEM_STATE), \
109 smsm_get_state(SMSM_APPS_DEM)); \
110 } while (0)
111
112#define MSM_PM_DEBUG_PRINT_SLEEP_INFO() \
113 do { \
114 if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE) \
115 smsm_print_sleep_info(msm_pm_smem_data->sleep_time, \
116 msm_pm_smem_data->resources_used, \
117 msm_pm_smem_data->irq_mask, \
118 msm_pm_smem_data->wakeup_reason, \
119 msm_pm_smem_data->pending_irqs); \
120 } while (0)
121
122
123/******************************************************************************
124 * Sleep Modes and Parameters
125 *****************************************************************************/
126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127static int msm_pm_idle_sleep_min_time = CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME;
128module_param_named(
129 idle_sleep_min_time, msm_pm_idle_sleep_min_time,
130 int, S_IRUGO | S_IWUSR | S_IWGRP
131);
132
133enum {
134 MSM_PM_MODE_ATTR_SUSPEND,
135 MSM_PM_MODE_ATTR_IDLE,
136 MSM_PM_MODE_ATTR_LATENCY,
137 MSM_PM_MODE_ATTR_RESIDENCY,
138 MSM_PM_MODE_ATTR_NR,
139};
140
141static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
142 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
143 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
144 [MSM_PM_MODE_ATTR_LATENCY] = "latency",
145 [MSM_PM_MODE_ATTR_RESIDENCY] = "residency",
146};
147
148static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
149 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND] = " ",
150 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] =
152 "ramp_down_and_wfi",
153 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
154 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] =
155 "power_collapse_no_xo_shutdown",
156 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
157 "standalone_power_collapse",
158};
159
160static struct msm_pm_platform_data *msm_pm_modes;
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530161static struct msm_pm_irq_calls *msm_pm_irq_extns;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700162
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530163struct msm_pm_kobj_attribute {
164 unsigned int cpu;
165 struct kobj_attribute ka;
166};
167
168#define GET_CPU_OF_ATTR(attr) \
169 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
170
171struct msm_pm_sysfs_sleep_mode {
172 struct kobject *kobj;
173 struct attribute_group attr_group;
174 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
175 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
176};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177
178/*
179 * Write out the attribute.
180 */
181static ssize_t msm_pm_mode_attr_show(
182 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
183{
184 int ret = -EINVAL;
185 int i;
186
187 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
188 struct kernel_param kp;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530189 unsigned int cpu;
190 struct msm_pm_platform_data *mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191
192 if (msm_pm_sleep_mode_labels[i] == NULL)
193 continue;
194
195 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
196 continue;
197
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530198 cpu = GET_CPU_OF_ATTR(attr);
199 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 if (!strcmp(attr->attr.name,
202 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530203 u32 arg = mode->suspend_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kp.arg = &arg;
205 ret = param_get_ulong(buf, &kp);
206 } else if (!strcmp(attr->attr.name,
207 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530208 u32 arg = mode->idle_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209 kp.arg = &arg;
210 ret = param_get_ulong(buf, &kp);
211 } else if (!strcmp(attr->attr.name,
212 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_LATENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530213 u32 arg = mode->latency;
214 kp.arg = &arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 ret = param_get_ulong(buf, &kp);
216 } else if (!strcmp(attr->attr.name,
217 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_RESIDENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530218 u32 arg = mode->residency;
219 kp.arg = &arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 ret = param_get_ulong(buf, &kp);
221 }
222
223 break;
224 }
225
226 if (ret > 0) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530227 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 ret++;
229 }
230
231 return ret;
232}
233
234/*
235 * Read in the new attribute value.
236 */
237static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
238 struct kobj_attribute *attr, const char *buf, size_t count)
239{
240 int ret = -EINVAL;
241 int i;
242
243 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
244 struct kernel_param kp;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530245 unsigned int cpu;
246 struct msm_pm_platform_data *mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247
248 if (msm_pm_sleep_mode_labels[i] == NULL)
249 continue;
250
251 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
252 continue;
253
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530254 cpu = GET_CPU_OF_ATTR(attr);
255 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 if (!strcmp(attr->attr.name,
258 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530259 kp.arg = &mode->suspend_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 ret = param_set_byte(buf, &kp);
261 } else if (!strcmp(attr->attr.name,
262 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530263 kp.arg = &mode->idle_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 ret = param_set_byte(buf, &kp);
265 } else if (!strcmp(attr->attr.name,
266 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_LATENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530267 kp.arg = &mode->latency;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 ret = param_set_ulong(buf, &kp);
269 } else if (!strcmp(attr->attr.name,
270 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_RESIDENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530271 kp.arg = &mode->residency;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 ret = param_set_ulong(buf, &kp);
273 }
274
275 break;
276 }
277
278 return ret ? ret : count;
279}
280
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530281 /* Add sysfs entries for one cpu. */
282static int __init msm_pm_mode_sysfs_add_cpu(
283 unsigned int cpu, struct kobject *modes_kobj)
284{
285 char cpu_name[8];
286 struct kobject *cpu_kobj;
287 struct msm_pm_sysfs_sleep_mode *mode = NULL;
288 int i, j, k;
289 int ret;
290
291 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
292 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
293 if (!cpu_kobj) {
294 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
295 ret = -ENOMEM;
296 goto mode_sysfs_add_cpu_exit;
297 }
298
299 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
300 int idx = MSM_PM_MODE(cpu, i);
301
302 if ((!msm_pm_modes[idx].suspend_supported) &&
303 (!msm_pm_modes[idx].idle_supported))
304 continue;
305
306 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
307 if (!mode) {
308 pr_err("%s: cannot allocate memory for attributes\n",
309 __func__);
310 ret = -ENOMEM;
311 goto mode_sysfs_add_cpu_exit;
312 }
313
314 mode->kobj = kobject_create_and_add(
315 msm_pm_sleep_mode_labels[i], cpu_kobj);
316 if (!mode->kobj) {
317 pr_err("%s: cannot create kobject\n", __func__);
318 ret = -ENOMEM;
319 goto mode_sysfs_add_cpu_exit;
320 }
321
322 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
323 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
324 !msm_pm_modes[idx].idle_supported)
325 continue;
326 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
327 !msm_pm_modes[idx].suspend_supported)
328 continue;
329 mode->kas[j].cpu = cpu;
330 mode->kas[j].ka.attr.mode = 0644;
331 mode->kas[j].ka.show = msm_pm_mode_attr_show;
332 mode->kas[j].ka.store = msm_pm_mode_attr_store;
333 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
334 mode->attrs[j] = &mode->kas[j].ka.attr;
335 j++;
336 }
337 mode->attrs[j] = NULL;
338
339 mode->attr_group.attrs = mode->attrs;
340 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
341 if (ret) {
342 printk(KERN_ERR
343 "%s: cannot create kobject attribute group\n",
344 __func__);
345 goto mode_sysfs_add_cpu_exit;
346 }
347 }
348
349 ret = 0;
350
351mode_sysfs_add_cpu_exit:
352 if (ret) {
353 if (mode && mode->kobj)
354 kobject_del(mode->kobj);
355 kfree(mode);
356 }
357
358 return ret;
359}
360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361/*
362 * Add sysfs entries for the sleep modes.
363 */
364static int __init msm_pm_mode_sysfs_add(void)
365{
366 struct kobject *module_kobj = NULL;
367 struct kobject *modes_kobj = NULL;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530368 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 int ret;
370
371 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
372 if (!module_kobj) {
373 printk(KERN_ERR "%s: cannot find kobject for module %s\n",
374 __func__, KBUILD_MODNAME);
375 ret = -ENOENT;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530376 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 }
378
379 modes_kobj = kobject_create_and_add("modes", module_kobj);
380 if (!modes_kobj) {
381 printk(KERN_ERR "%s: cannot create modes kobject\n", __func__);
382 ret = -ENOMEM;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530383 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 }
385
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530386 for_each_possible_cpu(cpu) {
387 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
388 if (ret)
389 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 }
391
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530392 ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530394mode_sysfs_add_exit:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 return ret;
396}
397
398void __init msm_pm_set_platform_data(
399 struct msm_pm_platform_data *data, int count)
400{
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530401 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 msm_pm_modes = data;
403}
404
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530405void __init msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls)
406{
407 /* sanity check */
408 BUG_ON(irq_calls == NULL || irq_calls->irq_pending == NULL ||
409 irq_calls->idle_sleep_allowed == NULL ||
410 irq_calls->enter_sleep1 == NULL ||
411 irq_calls->enter_sleep2 == NULL ||
412 irq_calls->exit_sleep1 == NULL ||
413 irq_calls->exit_sleep2 == NULL ||
414 irq_calls->exit_sleep3 == NULL);
415
416 msm_pm_irq_extns = irq_calls;
417}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
419/******************************************************************************
420 * Sleep Limitations
421 *****************************************************************************/
422enum {
423 SLEEP_LIMIT_NONE = 0,
424 SLEEP_LIMIT_NO_TCXO_SHUTDOWN = 2,
425 SLEEP_LIMIT_MASK = 0x03,
426};
427
428#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
429enum {
430 SLEEP_RESOURCE_MEMORY_BIT0 = 0x0200,
431 SLEEP_RESOURCE_MEMORY_BIT1 = 0x0010,
432};
433#endif
434
435
436/******************************************************************************
437 * Configure Hardware for Power Down/Up
438 *****************************************************************************/
439
440#if defined(CONFIG_ARCH_MSM7X30)
Taniya Das298de8c2012-02-16 11:45:31 +0530441#define APPS_CLK_SLEEP_EN (MSM_APCS_GCC_BASE + 0x020)
442#define APPS_PWRDOWN (MSM_ACC0_BASE + 0x01c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443#define APPS_SECOP (MSM_TCSR_BASE + 0x038)
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530444#define APPS_STANDBY_CTL NULL
445#else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446#define APPS_CLK_SLEEP_EN (MSM_CSR_BASE + 0x11c)
447#define APPS_PWRDOWN (MSM_CSR_BASE + 0x440)
448#define APPS_STANDBY_CTL (MSM_CSR_BASE + 0x108)
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530449#define APPS_SECOP NULL
450#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
452/*
453 * Configure hardware registers in preparation for Apps power down.
454 */
455static void msm_pm_config_hw_before_power_down(void)
456{
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530457 if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
458 __raw_writel(1, APPS_PWRDOWN);
459 mb();
460 __raw_writel(4, APPS_SECOP);
461 mb();
462 } else if (cpu_is_msm7x27()) {
463 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
464 mb();
465 __raw_writel(1, APPS_PWRDOWN);
466 mb();
467 } else if (cpu_is_msm7x27a() || cpu_is_msm7x27aa() ||
468 cpu_is_msm7x25a() || cpu_is_msm7x25aa()) {
469 __raw_writel(0x7, APPS_CLK_SLEEP_EN);
470 mb();
471 __raw_writel(1, APPS_PWRDOWN);
472 mb();
473 } else {
474 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
475 mb();
476 __raw_writel(1, APPS_PWRDOWN);
477 mb();
478 __raw_writel(0, APPS_STANDBY_CTL);
479 mb();
480 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481}
482
483/*
Anji jonnala1f2377c2012-03-27 14:35:55 +0530484 * Program the top csr from core0 context to put the
485 * core1 into GDFS, as core1 is not running yet.
486 */
487static void configure_top_csr(void)
488{
489 void __iomem *base_ptr;
490 unsigned int value = 0;
491
492 base_ptr = ioremap_nocache(MSM_CORE1_RESET, SZ_4);
493 if (!base_ptr)
494 return;
495
496 /* bring the core1 out of reset */
497 __raw_writel(0x3, base_ptr);
498 mb();
499 /*
500 * override DBGNOPOWERDN and program the GDFS
501 * count val
502 */
503
504 __raw_writel(0x00030002, (MSM_CFG_CTL_BASE + 0x38));
505 mb();
506
507 /* Initialize the SPM0 and SPM1 registers */
508 msm_spm_reinit();
509
510 /* enable TCSR for core1 */
511 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
512 value |= BIT(22);
513 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
514 mb();
515
516 /* set reset bit for SPM1 */
517 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
518 value |= BIT(20);
519 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
520 mb();
521
522 /* set CLK_OFF bit */
523 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
524 value |= BIT(18);
525 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
526 mb();
527
528 /* set clamps bit */
529 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
530 value |= BIT(21);
531 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
532 mb();
533
534 /* set power_up bit */
535 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
536 value |= BIT(19);
537 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
538 mb();
539
540 /* Disable TSCR for core0 */
541 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
542 value &= ~BIT(22);
543 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
544 mb();
545 __raw_writel(0x0, base_ptr);
546 mb();
547 iounmap(base_ptr);
548}
549
550/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 * Clear hardware registers after Apps powers up.
552 */
553static void msm_pm_config_hw_after_power_up(void)
554{
Anji jonnala1f2377c2012-03-27 14:35:55 +0530555
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530556 if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
557 __raw_writel(0, APPS_SECOP);
558 mb();
559 __raw_writel(0, APPS_PWRDOWN);
560 mb();
561 msm_spm_reinit();
562 } else {
563 __raw_writel(0, APPS_PWRDOWN);
564 mb();
565 __raw_writel(0, APPS_CLK_SLEEP_EN);
566 mb();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530567
Murali Nalajala07b04022012-04-10 16:00:49 +0530568 if (cpu_is_msm8625() && power_collapsed) {
Anji jonnala1f2377c2012-03-27 14:35:55 +0530569 /*
570 * enable the SCU while coming out of power
571 * collapse.
572 */
573 scu_enable(MSM_SCU_BASE);
574 /*
575 * Program the top csr to put the core1 into GDFS.
576 */
577 configure_top_csr();
578 }
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530579 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580}
581
582/*
583 * Configure hardware registers in preparation for SWFI.
584 */
585static void msm_pm_config_hw_before_swfi(void)
586{
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530587 if (cpu_is_qsd8x50()) {
588 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
589 mb();
590 } else if (cpu_is_msm7x27()) {
591 __raw_writel(0x0f, APPS_CLK_SLEEP_EN);
592 mb();
593 } else if (cpu_is_msm7x27a() || cpu_is_msm7x27aa() ||
594 cpu_is_msm7x25a() || cpu_is_msm7x25aa()) {
595 __raw_writel(0x7, APPS_CLK_SLEEP_EN);
596 mb();
597 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598}
599
600/*
601 * Respond to timing out waiting for Modem
602 *
603 * NOTE: The function never returns.
604 */
605static void msm_pm_timeout(void)
606{
607#if defined(CONFIG_MSM_PM_TIMEOUT_RESET_CHIP)
608 printk(KERN_EMERG "%s(): resetting chip\n", __func__);
609 msm_proc_comm(PCOM_RESET_CHIP_IMM, NULL, NULL);
610#elif defined(CONFIG_MSM_PM_TIMEOUT_RESET_MODEM)
611 printk(KERN_EMERG "%s(): resetting modem\n", __func__);
612 msm_proc_comm_reset_modem_now();
613#elif defined(CONFIG_MSM_PM_TIMEOUT_HALT)
614 printk(KERN_EMERG "%s(): halting\n", __func__);
615#endif
616 for (;;)
617 ;
618}
619
620
621/******************************************************************************
622 * State Polling Definitions
623 *****************************************************************************/
624
625struct msm_pm_polled_group {
626 uint32_t group_id;
627
628 uint32_t bits_all_set;
629 uint32_t bits_all_clear;
630 uint32_t bits_any_set;
631 uint32_t bits_any_clear;
632
633 uint32_t value_read;
634};
635
636/*
637 * Return true if all bits indicated by flag are set in source.
638 */
639static inline bool msm_pm_all_set(uint32_t source, uint32_t flag)
640{
641 return (source & flag) == flag;
642}
643
644/*
645 * Return true if any bit indicated by flag are set in source.
646 */
647static inline bool msm_pm_any_set(uint32_t source, uint32_t flag)
648{
649 return !flag || (source & flag);
650}
651
652/*
653 * Return true if all bits indicated by flag are cleared in source.
654 */
655static inline bool msm_pm_all_clear(uint32_t source, uint32_t flag)
656{
657 return (~source & flag) == flag;
658}
659
660/*
661 * Return true if any bit indicated by flag are cleared in source.
662 */
663static inline bool msm_pm_any_clear(uint32_t source, uint32_t flag)
664{
665 return !flag || (~source & flag);
666}
667
668/*
669 * Poll the shared memory states as indicated by the poll groups.
670 *
671 * nr_grps: number of groups in the array
672 * grps: array of groups
673 *
674 * The function returns when conditions specified by any of the poll
675 * groups become true. The conditions specified by a poll group are
676 * deemed true when 1) at least one bit from bits_any_set is set OR one
677 * bit from bits_any_clear is cleared; and 2) all bits in bits_all_set
678 * are set; and 3) all bits in bits_all_clear are cleared.
679 *
680 * Return value:
681 * >=0: index of the poll group whose conditions have become true
682 * -ETIMEDOUT: timed out
683 */
684static int msm_pm_poll_state(int nr_grps, struct msm_pm_polled_group *grps)
685{
686 int i, k;
687
688 for (i = 0; i < 50000; i++) {
689 for (k = 0; k < nr_grps; k++) {
690 bool all_set, all_clear;
691 bool any_set, any_clear;
692
693 grps[k].value_read = smsm_get_state(grps[k].group_id);
694
695 all_set = msm_pm_all_set(grps[k].value_read,
696 grps[k].bits_all_set);
697 all_clear = msm_pm_all_clear(grps[k].value_read,
698 grps[k].bits_all_clear);
699 any_set = msm_pm_any_set(grps[k].value_read,
700 grps[k].bits_any_set);
701 any_clear = msm_pm_any_clear(grps[k].value_read,
702 grps[k].bits_any_clear);
703
704 if (all_set && all_clear && (any_set || any_clear))
705 return k;
706 }
707 udelay(50);
708 }
709
710 printk(KERN_ERR "%s failed:\n", __func__);
711 for (k = 0; k < nr_grps; k++)
712 printk(KERN_ERR "(%x, %x, %x, %x) %x\n",
713 grps[k].bits_all_set, grps[k].bits_all_clear,
714 grps[k].bits_any_set, grps[k].bits_any_clear,
715 grps[k].value_read);
716
717 return -ETIMEDOUT;
718}
719
720
721/******************************************************************************
722 * Suspend Max Sleep Time
723 *****************************************************************************/
724
725#define SCLK_HZ (32768)
726#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
727
728#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
729static int msm_pm_sleep_time_override;
730module_param_named(sleep_time_override,
731 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
732#endif
733
734static uint32_t msm_pm_max_sleep_time;
735
736/*
737 * Convert time from nanoseconds to slow clock ticks, then cap it to the
738 * specified limit
739 */
740static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
741{
742 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
743 return (time_ns > limit) ? limit : time_ns;
744}
745
746/*
747 * Set the sleep time for suspend. 0 means infinite sleep time.
748 */
749void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
750{
751 unsigned long flags;
752
753 local_irq_save(flags);
754 if (max_sleep_time_ns == 0) {
755 msm_pm_max_sleep_time = 0;
756 } else {
757 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
758 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
759
760 if (msm_pm_max_sleep_time == 0)
761 msm_pm_max_sleep_time = 1;
762 }
763
764 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
765 "%s(): Requested %lld ns Giving %u sclk ticks\n", __func__,
766 max_sleep_time_ns, msm_pm_max_sleep_time);
767 local_irq_restore(flags);
768}
769EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
770
771
772/******************************************************************************
773 * CONFIG_MSM_IDLE_STATS
774 *****************************************************************************/
775
776#ifdef CONFIG_MSM_IDLE_STATS
777enum msm_pm_time_stats_id {
778 MSM_PM_STAT_REQUESTED_IDLE,
779 MSM_PM_STAT_IDLE_SPIN,
780 MSM_PM_STAT_IDLE_WFI,
781 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
782 MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
784 MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
785 MSM_PM_STAT_SUSPEND,
786 MSM_PM_STAT_FAILED_SUSPEND,
787 MSM_PM_STAT_NOT_IDLE,
788 MSM_PM_STAT_COUNT
789};
790
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530791struct msm_pm_time_stats {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792 const char *name;
793 int64_t first_bucket_time;
794 int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
795 int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
796 int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
797 int count;
798 int64_t total_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799};
800
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530801struct msm_pm_cpu_time_stats {
802 struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
803};
804
805static DEFINE_PER_CPU_SHARED_ALIGNED(
806 struct msm_pm_cpu_time_stats, msm_pm_stats);
807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
809
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530810static DEFINE_SPINLOCK(msm_pm_stats_lock);
811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812/*
813 * Add the given time data to the statistics collection.
814 */
815static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
816{
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530817 unsigned long flags;
818 struct msm_pm_time_stats *stats;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 int i;
820 int64_t bt;
821
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530822 spin_lock_irqsave(&msm_pm_stats_lock, flags);
823 stats = __get_cpu_var(msm_pm_stats).stats;
824
825 stats[id].total_time += t;
826 stats[id].count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827
828 bt = t;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530829 do_div(bt, stats[id].first_bucket_time);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830
831 if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
832 (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
833 i = DIV_ROUND_UP(fls((uint32_t)bt),
834 CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
835 else
836 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
837
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530838 if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
839 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530841 stats[id].bucket[i]++;
842
843 if (t < stats[id].min_time[i] || !stats[id].max_time[i])
844 stats[id].min_time[i] = t;
845 if (t > stats[id].max_time[i])
846 stats[id].max_time[i] = t;
847
848 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849}
850
851/*
852 * Helper function of snprintf where buf is auto-incremented, size is auto-
853 * decremented, and there is no return value.
854 *
855 * NOTE: buf and size must be l-values (e.g. variables)
856 */
857#define SNPRINTF(buf, size, format, ...) \
858 do { \
859 if (size > 0) { \
860 int ret; \
861 ret = snprintf(buf, size, format, ## __VA_ARGS__); \
862 if (ret > size) { \
863 buf += size; \
864 size = 0; \
865 } else { \
866 buf += ret; \
867 size -= ret; \
868 } \
869 } \
870 } while (0)
871
872/*
873 * Write out the power management statistics.
874 */
875static int msm_pm_read_proc
876 (char *page, char **start, off_t off, int count, int *eof, void *data)
877{
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530878 unsigned int cpu = off / MSM_PM_STAT_COUNT;
879 int id = off % MSM_PM_STAT_COUNT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880 char *p = page;
881
882 if (count < 1024) {
883 *start = (char *) 0;
884 *eof = 0;
885 return 0;
886 }
887
888 if (!off) {
889 SNPRINTF(p, count, "Last power collapse voted ");
890 if ((msm_pm_sleep_limit & SLEEP_LIMIT_MASK) ==
891 SLEEP_LIMIT_NONE)
892 SNPRINTF(p, count, "for TCXO shutdown\n\n");
893 else
894 SNPRINTF(p, count, "against TCXO shutdown\n\n");
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530895 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700896
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530897 if (cpu < num_possible_cpus()) {
898 unsigned long flags;
899 struct msm_pm_time_stats *stats;
900 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 int64_t bucket_time;
902 int64_t s;
903 uint32_t ns;
904
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530905 spin_lock_irqsave(&msm_pm_stats_lock, flags);
906 stats = per_cpu(msm_pm_stats, cpu).stats;
907
908 s = stats[id].total_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909 ns = do_div(s, NSEC_PER_SEC);
910 SNPRINTF(p, count,
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530911 "[cpu %u] %s:\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 " count: %7d\n"
913 " total_time: %lld.%09u\n",
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530914 cpu, stats[id].name,
915 stats[id].count,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 s, ns);
917
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530918 bucket_time = stats[id].first_bucket_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919 for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
920 s = bucket_time;
921 ns = do_div(s, NSEC_PER_SEC);
922 SNPRINTF(p, count,
923 " <%6lld.%09u: %7d (%lld-%lld)\n",
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530924 s, ns, stats[id].bucket[i],
925 stats[id].min_time[i],
926 stats[id].max_time[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927
928 bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
929 }
930
931 SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530932 s, ns, stats[id].bucket[i],
933 stats[id].min_time[i],
934 stats[id].max_time[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935
936 *start = (char *) 1;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530937 *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
938
939 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940 }
941
942 return p - page;
943}
944#undef SNPRINTF
945
946#define MSM_PM_STATS_RESET "reset"
947
948/*
949 * Reset the power management statistics values.
950 */
951static int msm_pm_write_proc(struct file *file, const char __user *buffer,
952 unsigned long count, void *data)
953{
954 char buf[sizeof(MSM_PM_STATS_RESET)];
955 int ret;
956 unsigned long flags;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530957 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958
959 if (count < strlen(MSM_PM_STATS_RESET)) {
960 ret = -EINVAL;
961 goto write_proc_failed;
962 }
963
964 if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
965 ret = -EFAULT;
966 goto write_proc_failed;
967 }
968
969 if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
970 ret = -EINVAL;
971 goto write_proc_failed;
972 }
973
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530974 spin_lock_irqsave(&msm_pm_stats_lock, flags);
975 for_each_possible_cpu(cpu) {
976 struct msm_pm_time_stats *stats;
977 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530979 stats = per_cpu(msm_pm_stats, cpu).stats;
980 for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
981 memset(stats[i].bucket,
982 0, sizeof(stats[i].bucket));
983 memset(stats[i].min_time,
984 0, sizeof(stats[i].min_time));
985 memset(stats[i].max_time,
986 0, sizeof(stats[i].max_time));
987 stats[i].count = 0;
988 stats[i].total_time = 0;
989 }
990 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530992 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993
994 return count;
995
996write_proc_failed:
997 return ret;
998}
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000#undef MSM_PM_STATS_RESET
1001#endif /* CONFIG_MSM_IDLE_STATS */
1002
1003
1004/******************************************************************************
1005 * Shared Memory Bits
1006 *****************************************************************************/
1007
1008#define DEM_MASTER_BITS_PER_CPU 6
1009
1010/* Power Master State Bits - Per CPU */
1011#define DEM_MASTER_SMSM_RUN \
1012 (0x01UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
1013#define DEM_MASTER_SMSM_RSA \
1014 (0x02UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
1015#define DEM_MASTER_SMSM_PWRC_EARLY_EXIT \
1016 (0x04UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
1017#define DEM_MASTER_SMSM_SLEEP_EXIT \
1018 (0x08UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
1019#define DEM_MASTER_SMSM_READY \
1020 (0x10UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
1021#define DEM_MASTER_SMSM_SLEEP \
1022 (0x20UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
1023
1024/* Power Slave State Bits */
1025#define DEM_SLAVE_SMSM_RUN (0x0001)
1026#define DEM_SLAVE_SMSM_PWRC (0x0002)
1027#define DEM_SLAVE_SMSM_PWRC_DELAY (0x0004)
1028#define DEM_SLAVE_SMSM_PWRC_EARLY_EXIT (0x0008)
1029#define DEM_SLAVE_SMSM_WFPI (0x0010)
1030#define DEM_SLAVE_SMSM_SLEEP (0x0020)
1031#define DEM_SLAVE_SMSM_SLEEP_EXIT (0x0040)
1032#define DEM_SLAVE_SMSM_MSGS_REDUCED (0x0080)
1033#define DEM_SLAVE_SMSM_RESET (0x0100)
1034#define DEM_SLAVE_SMSM_PWRC_SUSPEND (0x0200)
1035
1036
1037/******************************************************************************
1038 * Shared Memory Data
1039 *****************************************************************************/
1040
1041#define DEM_MAX_PORT_NAME_LEN (20)
1042
1043struct msm_pm_smem_t {
1044 uint32_t sleep_time;
1045 uint32_t irq_mask;
1046 uint32_t resources_used;
1047 uint32_t reserved1;
1048
1049 uint32_t wakeup_reason;
1050 uint32_t pending_irqs;
1051 uint32_t rpc_prog;
1052 uint32_t rpc_proc;
1053 char smd_port_name[DEM_MAX_PORT_NAME_LEN];
1054 uint32_t reserved2;
1055};
1056
1057
1058/******************************************************************************
1059 *
1060 *****************************************************************************/
1061static struct msm_pm_smem_t *msm_pm_smem_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062static atomic_t msm_pm_init_done = ATOMIC_INIT(0);
1063
1064static int msm_pm_modem_busy(void)
1065{
1066 if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) {
1067 MSM_PM_DPRINTK(MSM_PM_DEBUG_POWER_COLLAPSE,
1068 KERN_INFO, "%s(): master not ready\n", __func__);
1069 return -EBUSY;
1070 }
1071
1072 return 0;
1073}
1074
1075/*
1076 * Power collapse the Apps processor. This function executes the handshake
1077 * protocol with Modem.
1078 *
1079 * Return value:
1080 * -EAGAIN: modem reset occurred or early exit from power collapse
1081 * -EBUSY: modem not ready for our power collapse -- no power loss
1082 * -ETIMEDOUT: timed out waiting for modem's handshake -- no power loss
1083 * 0: success
1084 */
1085static int msm_pm_power_collapse
1086 (bool from_idle, uint32_t sleep_delay, uint32_t sleep_limit)
1087{
1088 struct msm_pm_polled_group state_grps[2];
1089 unsigned long saved_acpuclk_rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 int collapsed = 0;
1091 int ret;
Murali Nalajala07b04022012-04-10 16:00:49 +05301092 int val;
1093 int modem_early_exit = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094
1095 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1096 KERN_INFO, "%s(): idle %d, delay %u, limit %u\n", __func__,
1097 (int)from_idle, sleep_delay, sleep_limit);
1098
1099 if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) {
1100 MSM_PM_DPRINTK(
1101 MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
1102 KERN_INFO, "%s(): master not ready\n", __func__);
1103 ret = -EBUSY;
1104 goto power_collapse_bail;
1105 }
1106
1107 memset(msm_pm_smem_data, 0, sizeof(*msm_pm_smem_data));
1108
Murali Nalajala41786ab2012-03-06 10:47:32 +05301109 if (cpu_is_msm8625()) {
1110 /* Program the SPM */
1111 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE,
1112 false);
1113 WARN_ON(ret);
1114 }
1115
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301116 msm_pm_irq_extns->enter_sleep1(true, from_idle,
1117 &msm_pm_smem_data->irq_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 msm_sirc_enter_sleep();
1119 msm_gpio_enter_sleep(from_idle);
1120
1121 msm_pm_smem_data->sleep_time = sleep_delay;
1122 msm_pm_smem_data->resources_used = sleep_limit;
1123
1124 /* Enter PWRC/PWRC_SUSPEND */
1125
1126 if (from_idle)
1127 smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
1128 DEM_SLAVE_SMSM_PWRC);
1129 else
1130 smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
1131 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND);
1132
1133 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC");
1134 MSM_PM_DEBUG_PRINT_SLEEP_INFO();
1135
1136 memset(state_grps, 0, sizeof(state_grps));
1137 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1138 state_grps[0].bits_all_set = DEM_MASTER_SMSM_RSA;
1139 state_grps[1].group_id = SMSM_MODEM_STATE;
1140 state_grps[1].bits_all_set = SMSM_RESET;
1141
1142 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1143
1144 if (ret < 0) {
1145 printk(KERN_EMERG "%s(): power collapse entry "
1146 "timed out waiting for Modem's response\n", __func__);
1147 msm_pm_timeout();
1148 }
1149
1150 if (ret == 1) {
1151 MSM_PM_DPRINTK(
1152 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1153 KERN_INFO,
1154 "%s(): msm_pm_poll_state detected Modem reset\n",
1155 __func__);
1156 goto power_collapse_early_exit;
1157 }
1158
1159 /* DEM Master in RSA */
1160
1161 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC RSA");
1162
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301163 ret = msm_pm_irq_extns->enter_sleep2(true, from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 if (ret < 0) {
1165 MSM_PM_DPRINTK(
1166 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1167 KERN_INFO,
1168 "%s(): msm_irq_enter_sleep2 aborted, %d\n", __func__,
1169 ret);
1170 goto power_collapse_early_exit;
1171 }
1172
1173 msm_pm_config_hw_before_power_down();
1174 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): pre power down");
1175
1176 saved_acpuclk_rate = acpuclk_power_collapse();
1177 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1178 "%s(): change clock rate (old rate = %lu)\n", __func__,
1179 saved_acpuclk_rate);
1180
1181 if (saved_acpuclk_rate == 0) {
1182 msm_pm_config_hw_after_power_up();
1183 goto power_collapse_early_exit;
1184 }
1185
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001186 msm_pm_boot_config_before_pc(smp_processor_id(),
1187 virt_to_phys(msm_pm_collapse_exit));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188
1189#ifdef CONFIG_VFP
1190 if (from_idle)
1191 vfp_flush_context();
1192#endif
1193
1194#ifdef CONFIG_CACHE_L2X0
Sridhar Parasurama0222902012-04-27 11:18:02 -07001195 l2x0_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196#endif
1197
1198 collapsed = msm_pm_collapse();
Murali Nalajala07b04022012-04-10 16:00:49 +05301199
1200 /*
1201 * TBD: Currently recognise the MODEM early exit
1202 * path by reading the MPA5_GDFS_CNT_VAL register.
1203 */
1204 if (cpu_is_msm8625()) {
1205 /*
1206 * on system reset default value of MPA5_GDFS_CNT_VAL
1207 * is = 0xFF, later power driver reprogrammed this
1208 * as: 0x000300FF. Currently based on the value of
1209 * MPA5_GDFS_CNT_VAL register decide whether it is
1210 * a modem early exit are not.
1211 */
1212 val = __raw_readl(MSM_CFG_CTL_BASE + 0x38);
1213 if (val != 0xFF)
1214 modem_early_exit = 1;
1215 else
1216 power_collapsed = 1;
1217 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218
1219#ifdef CONFIG_CACHE_L2X0
Sridhar Parasurama0222902012-04-27 11:18:02 -07001220 l2x0_resume(collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221#endif
1222
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001223 msm_pm_boot_config_after_pc(smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224
1225 if (collapsed) {
1226#ifdef CONFIG_VFP
1227 if (from_idle)
1228 vfp_reinit();
1229#endif
1230 cpu_init();
1231 local_fiq_enable();
1232 }
1233
1234 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
1235 KERN_INFO,
1236 "%s(): msm_pm_collapse returned %d\n", __func__, collapsed);
1237
1238 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1239 "%s(): restore clock rate to %lu\n", __func__,
1240 saved_acpuclk_rate);
1241 if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate,
1242 SETRATE_PC) < 0)
1243 printk(KERN_ERR "%s(): failed to restore clock rate(%lu)\n",
1244 __func__, saved_acpuclk_rate);
1245
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301246 msm_pm_irq_extns->exit_sleep1(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 msm_pm_smem_data->wakeup_reason,
1248 msm_pm_smem_data->pending_irqs);
1249
1250 msm_pm_config_hw_after_power_up();
1251 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): post power up");
1252
1253 memset(state_grps, 0, sizeof(state_grps));
1254 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1255 state_grps[0].bits_any_set =
1256 DEM_MASTER_SMSM_RSA | DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
1257 state_grps[1].group_id = SMSM_MODEM_STATE;
1258 state_grps[1].bits_all_set = SMSM_RESET;
1259
1260 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1261
1262 if (ret < 0) {
1263 printk(KERN_EMERG "%s(): power collapse exit "
1264 "timed out waiting for Modem's response\n", __func__);
1265 msm_pm_timeout();
1266 }
1267
1268 if (ret == 1) {
1269 MSM_PM_DPRINTK(
1270 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1271 KERN_INFO,
1272 "%s(): msm_pm_poll_state detected Modem reset\n",
1273 __func__);
1274 goto power_collapse_early_exit;
1275 }
1276
1277 /* Sanity check */
Murali Nalajala07b04022012-04-10 16:00:49 +05301278 if (collapsed && !modem_early_exit) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 BUG_ON(!(state_grps[0].value_read & DEM_MASTER_SMSM_RSA));
1280 } else {
1281 BUG_ON(!(state_grps[0].value_read &
1282 DEM_MASTER_SMSM_PWRC_EARLY_EXIT));
1283 goto power_collapse_early_exit;
1284 }
1285
1286 /* Enter WFPI */
1287
1288 smsm_change_state(SMSM_APPS_DEM,
1289 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
1290 DEM_SLAVE_SMSM_WFPI);
1291
1292 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI");
1293
1294 memset(state_grps, 0, sizeof(state_grps));
1295 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1296 state_grps[0].bits_all_set = DEM_MASTER_SMSM_RUN;
1297 state_grps[1].group_id = SMSM_MODEM_STATE;
1298 state_grps[1].bits_all_set = SMSM_RESET;
1299
1300 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1301
1302 if (ret < 0) {
1303 printk(KERN_EMERG "%s(): power collapse WFPI "
1304 "timed out waiting for Modem's response\n", __func__);
1305 msm_pm_timeout();
1306 }
1307
1308 if (ret == 1) {
1309 MSM_PM_DPRINTK(
1310 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1311 KERN_INFO,
1312 "%s(): msm_pm_poll_state detected Modem reset\n",
1313 __func__);
1314 ret = -EAGAIN;
1315 goto power_collapse_restore_gpio_bail;
1316 }
1317
1318 /* DEM Master == RUN */
1319
1320 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI RUN");
1321 MSM_PM_DEBUG_PRINT_SLEEP_INFO();
1322
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301323 msm_pm_irq_extns->exit_sleep2(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 msm_pm_smem_data->wakeup_reason,
1325 msm_pm_smem_data->pending_irqs);
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301326 msm_pm_irq_extns->exit_sleep3(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 msm_pm_smem_data->wakeup_reason,
1328 msm_pm_smem_data->pending_irqs);
1329 msm_gpio_exit_sleep();
1330 msm_sirc_exit_sleep();
1331
1332 smsm_change_state(SMSM_APPS_DEM,
1333 DEM_SLAVE_SMSM_WFPI, DEM_SLAVE_SMSM_RUN);
1334
1335 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
1336
1337 smd_sleep_exit();
Murali Nalajala41786ab2012-03-06 10:47:32 +05301338
1339 if (cpu_is_msm8625()) {
1340 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
1341 false);
1342 WARN_ON(ret);
1343 }
1344
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345 return 0;
1346
1347power_collapse_early_exit:
1348 /* Enter PWRC_EARLY_EXIT */
1349
1350 smsm_change_state(SMSM_APPS_DEM,
1351 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
1352 DEM_SLAVE_SMSM_PWRC_EARLY_EXIT);
1353
1354 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT");
1355
1356 memset(state_grps, 0, sizeof(state_grps));
1357 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1358 state_grps[0].bits_all_set = DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
1359 state_grps[1].group_id = SMSM_MODEM_STATE;
1360 state_grps[1].bits_all_set = SMSM_RESET;
1361
1362 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1363 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT EE");
1364
1365 if (ret < 0) {
1366 printk(KERN_EMERG "%s(): power collapse EARLY_EXIT "
1367 "timed out waiting for Modem's response\n", __func__);
1368 msm_pm_timeout();
1369 }
1370
1371 if (ret == 1) {
1372 MSM_PM_DPRINTK(
1373 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1374 KERN_INFO,
1375 "%s(): msm_pm_poll_state detected Modem reset\n",
1376 __func__);
1377 }
1378
1379 /* DEM Master == RESET or PWRC_EARLY_EXIT */
1380
1381 ret = -EAGAIN;
1382
1383power_collapse_restore_gpio_bail:
1384 msm_gpio_exit_sleep();
1385 msm_sirc_exit_sleep();
1386
1387 /* Enter RUN */
1388 smsm_change_state(SMSM_APPS_DEM,
1389 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND |
1390 DEM_SLAVE_SMSM_PWRC_EARLY_EXIT, DEM_SLAVE_SMSM_RUN);
1391
1392 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
1393
1394 if (collapsed)
1395 smd_sleep_exit();
1396
1397power_collapse_bail:
Murali Nalajala41786ab2012-03-06 10:47:32 +05301398 if (cpu_is_msm8625()) {
1399 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
1400 false);
1401 WARN_ON(ret);
1402 }
1403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404 return ret;
1405}
1406
1407/*
1408 * Power collapse the Apps processor without involving Modem.
1409 *
1410 * Return value:
1411 * 0: success
1412 */
Stephen Boydb29750d2012-02-21 01:21:32 -08001413static int __ref msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 int collapsed = 0;
1416 int ret;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301417 void *entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418
1419 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1420 KERN_INFO, "%s()\n", __func__);
1421
1422 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE, false);
1423 WARN_ON(ret);
1424
Murali Nalajala41786ab2012-03-06 10:47:32 +05301425 entry = (!smp_processor_id() || from_idle) ?
1426 msm_pm_collapse_exit : msm_secondary_startup;
1427
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001428 msm_pm_boot_config_before_pc(smp_processor_id(),
Murali Nalajala41786ab2012-03-06 10:47:32 +05301429 virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001430
1431#ifdef CONFIG_VFP
1432 vfp_flush_context();
1433#endif
1434
1435#ifdef CONFIG_CACHE_L2X0
Murali Nalajala41786ab2012-03-06 10:47:32 +05301436 if (!cpu_is_msm8625())
Sridhar Parasurama0222902012-04-27 11:18:02 -07001437 l2x0_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001438#endif
1439
1440 collapsed = msm_pm_collapse();
1441
1442#ifdef CONFIG_CACHE_L2X0
Murali Nalajala41786ab2012-03-06 10:47:32 +05301443 if (!cpu_is_msm8625())
Sridhar Parasurama0222902012-04-27 11:18:02 -07001444 l2x0_resume(collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445#endif
1446
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001447 msm_pm_boot_config_after_pc(smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448
1449 if (collapsed) {
1450#ifdef CONFIG_VFP
1451 vfp_reinit();
1452#endif
1453 cpu_init();
1454 local_fiq_enable();
1455 }
1456
1457 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
1458 KERN_INFO,
1459 "%s(): msm_pm_collapse returned %d\n", __func__, collapsed);
1460
1461 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
1462 WARN_ON(ret);
1463
Anji jonnalac6816222012-03-31 10:55:14 +05301464 return !collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001465}
1466
1467/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468 * Bring the Apps processor to SWFI.
1469 *
1470 * Return value:
1471 * -EIO: could not ramp Apps processor clock
1472 * 0: success
1473 */
1474static int msm_pm_swfi(bool ramp_acpu)
1475{
1476 unsigned long saved_acpuclk_rate = 0;
1477
1478 if (ramp_acpu) {
1479 saved_acpuclk_rate = acpuclk_wait_for_irq();
1480 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1481 "%s(): change clock rate (old rate = %lu)\n", __func__,
1482 saved_acpuclk_rate);
1483
1484 if (!saved_acpuclk_rate)
1485 return -EIO;
1486 }
1487
Murali Nalajala41786ab2012-03-06 10:47:32 +05301488 if (!cpu_is_msm8625())
1489 msm_pm_config_hw_before_swfi();
1490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491 msm_arch_idle();
1492
1493 if (ramp_acpu) {
1494 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1495 "%s(): restore clock rate to %lu\n", __func__,
1496 saved_acpuclk_rate);
1497 if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate,
1498 SETRATE_SWFI) < 0)
1499 printk(KERN_ERR
1500 "%s(): failed to restore clock rate(%lu)\n",
1501 __func__, saved_acpuclk_rate);
1502 }
1503
1504 return 0;
1505}
1506
1507
1508/******************************************************************************
1509 * External Idle/Suspend Functions
1510 *****************************************************************************/
1511
1512/*
1513 * Put CPU in low power mode.
1514 */
1515void arch_idle(void)
1516{
1517 bool allow[MSM_PM_SLEEP_MODE_NR];
1518 uint32_t sleep_limit = SLEEP_LIMIT_NONE;
1519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 int64_t timer_expiration;
Murali Nalajala8fda4492012-03-19 18:22:59 +05301521 int latency_qos;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522 int ret;
1523 int i;
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301524 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525
1526#ifdef CONFIG_MSM_IDLE_STATS
1527 int64_t t1;
Murali Nalajalab86f3702012-03-30 17:54:57 +05301528 static DEFINE_PER_CPU(int64_t, t2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 int exit_stat;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301530 #endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531
1532 if (!atomic_read(&msm_pm_init_done))
1533 return;
1534
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301535 cpu = smp_processor_id();
1536
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001537 latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
Murali Nalajala8fda4492012-03-19 18:22:59 +05301538 /* get the next timer expiration */
1539 timer_expiration = ktime_to_ns(tick_nohz_get_sleep_length());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001540
1541#ifdef CONFIG_MSM_IDLE_STATS
1542 t1 = ktime_to_ns(ktime_get());
Murali Nalajalab86f3702012-03-30 17:54:57 +05301543 msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - __get_cpu_var(t2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001544 msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
Murali Nalajala7744d162012-01-13 13:06:03 +05301545 exit_stat = MSM_PM_STAT_IDLE_SPIN;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301546#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001547
1548 for (i = 0; i < ARRAY_SIZE(allow); i++)
1549 allow[i] = true;
1550
Murali Nalajala41786ab2012-03-06 10:47:32 +05301551 if (num_online_cpus() > 1 ||
1552 (timer_expiration < msm_pm_idle_sleep_min_time) ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001553#ifdef CONFIG_HAS_WAKELOCK
1554 has_wake_lock(WAKE_LOCK_IDLE) ||
1555#endif
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301556 !msm_pm_irq_extns->idle_sleep_allowed()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001557 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
1558 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001559 }
1560
1561 for (i = 0; i < ARRAY_SIZE(allow); i++) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301562 struct msm_pm_platform_data *mode =
1563 &msm_pm_modes[MSM_PM_MODE(cpu, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001564 if (!mode->idle_supported || !mode->idle_enabled ||
1565 mode->latency >= latency_qos ||
1566 mode->residency * 1000ULL >= timer_expiration)
1567 allow[i] = false;
1568 }
1569
1570 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1571 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
1572 uint32_t wait_us = CONFIG_MSM_IDLE_WAIT_ON_MODEM;
1573 while (msm_pm_modem_busy() && wait_us) {
1574 if (wait_us > 100) {
1575 udelay(100);
1576 wait_us -= 100;
1577 } else {
1578 udelay(wait_us);
1579 wait_us = 0;
1580 }
1581 }
1582
1583 if (msm_pm_modem_busy()) {
1584 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
1585 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]
1586 = false;
1587 }
1588 }
1589
1590 MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
1591 "%s(): latency qos %d, next timer %lld, sleep limit %u\n",
1592 __func__, latency_qos, timer_expiration, sleep_limit);
1593
1594 for (i = 0; i < ARRAY_SIZE(allow); i++)
1595 MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
1596 "%s(): allow %s: %d\n", __func__,
1597 msm_pm_sleep_mode_labels[i], (int)allow[i]);
1598
1599 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1600 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
Murali Nalajala8fda4492012-03-19 18:22:59 +05301601 /* Sync the timer with SCLK, it is needed only for modem
1602 * assissted pollapse case.
1603 */
1604 int64_t next_timer_exp = msm_timer_enter_idle();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605 uint32_t sleep_delay;
Murali Nalajala8fda4492012-03-19 18:22:59 +05301606 bool low_power = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607
1608 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
Murali Nalajala8fda4492012-03-19 18:22:59 +05301609 next_timer_exp, MSM_PM_SLEEP_TICK_LIMIT);
1610
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001611 if (sleep_delay == 0) /* 0 would mean infinite time */
1612 sleep_delay = 1;
1613
1614 if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1615 sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
1616
1617#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_ACTIVE)
1618 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
1619#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_RETENTION)
1620 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1621#endif
1622
1623 ret = msm_pm_power_collapse(true, sleep_delay, sleep_limit);
1624 low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
Murali Nalajala8fda4492012-03-19 18:22:59 +05301625 msm_timer_exit_idle(low_power);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001626
1627#ifdef CONFIG_MSM_IDLE_STATS
1628 if (ret)
1629 exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
1630 else {
1631 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
1632 msm_pm_sleep_limit = sleep_limit;
1633 }
Murali Nalajala41786ab2012-03-06 10:47:32 +05301634#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301636 ret = msm_pm_power_collapse_standalone(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637#ifdef CONFIG_MSM_IDLE_STATS
1638 exit_stat = ret ?
1639 MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE :
1640 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301641#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
1643 ret = msm_pm_swfi(true);
1644 if (ret)
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301645 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001646 udelay(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001647#ifdef CONFIG_MSM_IDLE_STATS
1648 exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301649#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001650 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1651 msm_pm_swfi(false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652#ifdef CONFIG_MSM_IDLE_STATS
1653 exit_stat = MSM_PM_STAT_IDLE_WFI;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301654#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001655 } else {
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301656 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 udelay(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658#ifdef CONFIG_MSM_IDLE_STATS
1659 exit_stat = MSM_PM_STAT_IDLE_SPIN;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301660#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001661 }
1662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663#ifdef CONFIG_MSM_IDLE_STATS
Murali Nalajalab86f3702012-03-30 17:54:57 +05301664 __get_cpu_var(t2) = ktime_to_ns(ktime_get());
1665 msm_pm_add_stat(exit_stat, __get_cpu_var(t2) - t1);
Murali Nalajala41786ab2012-03-06 10:47:32 +05301666#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667}
1668
1669/*
1670 * Suspend the Apps processor.
1671 *
1672 * Return value:
Murali Nalajala41786ab2012-03-06 10:47:32 +05301673 * -EPERM: Suspend happened by a not permitted core
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674 * -EAGAIN: modem reset occurred or early exit from suspend
1675 * -EBUSY: modem not ready for our suspend
1676 * -EINVAL: invalid sleep mode
1677 * -EIO: could not ramp Apps processor clock
1678 * -ETIMEDOUT: timed out waiting for modem's handshake
1679 * 0: success
1680 */
1681static int msm_pm_enter(suspend_state_t state)
1682{
1683 bool allow[MSM_PM_SLEEP_MODE_NR];
1684 uint32_t sleep_limit = SLEEP_LIMIT_NONE;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301685 int ret = -EPERM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687#ifdef CONFIG_MSM_IDLE_STATS
1688 int64_t period = 0;
1689 int64_t time = 0;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301690#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691
Murali Nalajala41786ab2012-03-06 10:47:32 +05301692 /* Must executed by CORE0 */
1693 if (smp_processor_id()) {
1694 __WARN();
1695 goto suspend_exit;
1696 }
1697
1698#ifdef CONFIG_MSM_IDLE_STATS
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001699 time = msm_timer_get_sclk_time(&period);
1700#endif
1701
1702 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
1703 "%s(): sleep limit %u\n", __func__, sleep_limit);
1704
1705 for (i = 0; i < ARRAY_SIZE(allow); i++)
1706 allow[i] = true;
1707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001708 for (i = 0; i < ARRAY_SIZE(allow); i++) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301709 struct msm_pm_platform_data *mode;
1710 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001711 if (!mode->suspend_supported || !mode->suspend_enabled)
1712 allow[i] = false;
1713 }
1714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001715 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1716 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
1717#ifdef CONFIG_MSM_IDLE_STATS
1718 enum msm_pm_time_stats_id id;
1719 int64_t end_time;
1720#endif
1721
1722 clock_debug_print_enabled();
1723
1724#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
1725 if (msm_pm_sleep_time_override > 0) {
1726 int64_t ns;
1727 ns = NSEC_PER_SEC * (int64_t)msm_pm_sleep_time_override;
1728 msm_pm_set_max_sleep_time(ns);
1729 msm_pm_sleep_time_override = 0;
1730 }
1731#endif
1732 if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1733 sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
1734
1735#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_ACTIVE)
1736 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
1737#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_RETENTION)
1738 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1739#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN)
1740 if (get_msm_migrate_pages_status() != MEM_OFFLINE)
1741 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1742#endif
1743
1744 for (i = 0; i < 30 && msm_pm_modem_busy(); i++)
1745 udelay(500);
1746
1747 ret = msm_pm_power_collapse(
1748 false, msm_pm_max_sleep_time, sleep_limit);
1749
1750#ifdef CONFIG_MSM_IDLE_STATS
1751 if (ret)
1752 id = MSM_PM_STAT_FAILED_SUSPEND;
1753 else {
1754 id = MSM_PM_STAT_SUSPEND;
1755 msm_pm_sleep_limit = sleep_limit;
1756 }
1757
1758 if (time != 0) {
1759 end_time = msm_timer_get_sclk_time(NULL);
1760 if (end_time != 0) {
1761 time = end_time - time;
1762 if (time < 0)
1763 time += period;
1764 } else
1765 time = 0;
1766 }
1767
1768 msm_pm_add_stat(id, time);
1769#endif
1770 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301771 ret = msm_pm_power_collapse_standalone(false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772 } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
1773 ret = msm_pm_swfi(true);
1774 if (ret)
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301775 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776 udelay(1);
1777 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1778 msm_pm_swfi(false);
1779 }
1780
Murali Nalajala41786ab2012-03-06 10:47:32 +05301781suspend_exit:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001782 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
1783 "%s(): return %d\n", __func__, ret);
1784
1785 return ret;
1786}
1787
1788static struct platform_suspend_ops msm_pm_ops = {
1789 .enter = msm_pm_enter,
1790 .valid = suspend_valid_only_mem,
1791};
1792
Murali Nalajalac89f2f32012-02-07 19:23:52 +05301793/* Hotplug the "non boot" CPU's and put
1794 * the cores into low power mode
1795 */
1796void msm_pm_cpu_enter_lowpower(unsigned int cpu)
1797{
Murali Nalajalaa7efba12012-02-23 18:13:52 +05301798 bool allow[MSM_PM_SLEEP_MODE_NR];
1799 int i;
1800
1801 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1802 struct msm_pm_platform_data *mode;
1803
1804 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
1805 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1806 }
1807
1808 MSM_PM_DPRINTK(MSM_PM_DEBUG_HOTPLUG, KERN_INFO,
1809 "CPU%u: %s: shutting down cpu\n", cpu, __func__);
1810
1811 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1812 msm_pm_power_collapse_standalone(false);
1813 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1814 msm_pm_swfi(false);
1815 } else {
1816 MSM_PM_DPRINTK(MSM_PM_DEBUG_HOTPLUG, KERN_INFO,
1817 "CPU%u: %s: shutting down failed!!!\n", cpu, __func__);
1818 }
Murali Nalajalac89f2f32012-02-07 19:23:52 +05301819}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001820
1821/******************************************************************************
1822 * Restart Definitions
1823 *****************************************************************************/
1824
1825static uint32_t restart_reason = 0x776655AA;
1826
1827static void msm_pm_power_off(void)
1828{
1829 msm_rpcrouter_close();
1830 msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
1831 for (;;)
1832 ;
1833}
1834
1835static void msm_pm_restart(char str, const char *cmd)
1836{
1837 msm_rpcrouter_close();
1838 msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
1839
1840 for (;;)
1841 ;
1842}
1843
1844static int msm_reboot_call
1845 (struct notifier_block *this, unsigned long code, void *_cmd)
1846{
1847 if ((code == SYS_RESTART) && _cmd) {
1848 char *cmd = _cmd;
1849 if (!strcmp(cmd, "bootloader")) {
1850 restart_reason = 0x77665500;
1851 } else if (!strcmp(cmd, "recovery")) {
1852 restart_reason = 0x77665502;
1853 } else if (!strcmp(cmd, "eraseflash")) {
1854 restart_reason = 0x776655EF;
1855 } else if (!strncmp(cmd, "oem-", 4)) {
1856 unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
1857 restart_reason = 0x6f656d00 | code;
1858 } else {
1859 restart_reason = 0x77665501;
1860 }
1861 }
1862 return NOTIFY_DONE;
1863}
1864
1865static struct notifier_block msm_reboot_notifier = {
1866 .notifier_call = msm_reboot_call,
1867};
1868
1869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870/*
1871 * Initialize the power management subsystem.
1872 *
1873 * Return value:
1874 * -ENODEV: initialization failed
1875 * 0: success
1876 */
1877static int __init msm_pm_init(void)
1878{
1879#ifdef CONFIG_MSM_IDLE_STATS
1880 struct proc_dir_entry *d_entry;
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301881 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882#endif
1883 int ret;
Murali Nalajala93f29992012-03-21 15:59:27 +05301884 int val;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885#ifdef CONFIG_CPU_V7
1886 pgd_t *pc_pgd;
1887 pmd_t *pmd;
1888 unsigned long pmdval;
1889
1890 /* Page table for cores to come back up safely. */
1891 pc_pgd = pgd_alloc(&init_mm);
1892 if (!pc_pgd)
1893 return -ENOMEM;
1894 pmd = pmd_offset(pc_pgd +
1895 pgd_index(virt_to_phys(msm_pm_collapse_exit)),
1896 virt_to_phys(msm_pm_collapse_exit));
1897 pmdval = (virt_to_phys(msm_pm_collapse_exit) & PGDIR_MASK) |
1898 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1899 pmd[0] = __pmd(pmdval);
1900 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1901
Steve Mucklefcece052012-02-18 20:09:58 -08001902 msm_saved_state_phys =
1903 allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
1904 num_possible_cpus(), 4);
1905 if (!msm_saved_state_phys)
1906 return -ENOMEM;
1907 msm_saved_state = ioremap_nocache(msm_saved_state_phys,
1908 CPU_SAVED_STATE_SIZE *
1909 num_possible_cpus());
1910 if (!msm_saved_state)
1911 return -ENOMEM;
1912
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001913 /* It is remotely possible that the code in msm_pm_collapse_exit()
1914 * which turns on the MMU with this mapping is in the
1915 * next even-numbered megabyte beyond the
1916 * start of msm_pm_collapse_exit().
1917 * Map this megabyte in as well.
1918 */
1919 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1920 flush_pmd_entry(pmd);
1921 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
Steve Muckle730ad7a2012-02-21 15:26:37 -08001922 clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
1923 virt_to_phys(&msm_pm_pc_pgd));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001924#endif
1925
1926 pm_power_off = msm_pm_power_off;
1927 arm_pm_restart = msm_pm_restart;
1928 register_reboot_notifier(&msm_reboot_notifier);
1929
1930 msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
1931 sizeof(*msm_pm_smem_data));
1932 if (msm_pm_smem_data == NULL) {
1933 printk(KERN_ERR "%s: failed to get smsm_data\n", __func__);
1934 return -ENODEV;
1935 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936
1937 ret = msm_timer_init_time_sync(msm_pm_timeout);
1938 if (ret)
1939 return ret;
1940
1941 ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0);
1942 if (ret) {
1943 printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
1944 __func__, ret);
1945 return ret;
1946 }
1947
Murali Nalajala93f29992012-03-21 15:59:27 +05301948 if (cpu_is_msm8625()) {
1949 target_type = TARGET_IS_8625;
1950 clean_caches((unsigned long)&target_type, sizeof(target_type),
1951 virt_to_phys(&target_type));
1952
1953 /* Override the DBGNOPOWERDN for each cpu in
1954 * MPA5_GDFS_CNT_VAL register
1955 */
1956 val = __raw_readl((MSM_CFG_CTL_BASE + 0x38));
1957 val = val | 0x00030000;
1958 __raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));
1959 }
1960
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
1962 /* The wakeup_reason field is overloaded during initialization time
1963 to signal Modem that Apps will control the low power modes of
1964 the memory.
1965 */
1966 msm_pm_smem_data->wakeup_reason = 1;
1967 smsm_change_state(SMSM_APPS_DEM, 0, DEM_SLAVE_SMSM_RUN);
1968#endif
1969
1970 BUG_ON(msm_pm_modes == NULL);
1971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972 suspend_set_ops(&msm_pm_ops);
1973
1974 msm_pm_mode_sysfs_add();
1975#ifdef CONFIG_MSM_IDLE_STATS
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301976 for_each_possible_cpu(cpu) {
1977 struct msm_pm_time_stats *stats =
1978 per_cpu(msm_pm_stats, cpu).stats;
1979
1980 stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
1981 stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
1982 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1983
1984 stats[MSM_PM_STAT_IDLE_SPIN].name = "idle-spin";
1985 stats[MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
1986 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1987
1988 stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
1989 stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
1990 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1991
1992 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
1993 "idle-standalone-power-collapse";
1994 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
1995 first_bucket_time =
1996 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1997
1998 stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].name =
1999 "idle-failed-standalone-power-collapse";
2000 stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].
2001 first_bucket_time =
2002 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
2003
Murali Nalajala0df9fee2012-01-12 15:26:09 +05302004 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
2005 "idle-power-collapse";
2006 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
2007 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
2008
2009 stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
2010 "idle-failed-power-collapse";
2011 stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].
2012 first_bucket_time =
2013 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
2014
2015 stats[MSM_PM_STAT_SUSPEND].name = "suspend";
2016 stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
2017 CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
2018
2019 stats[MSM_PM_STAT_FAILED_SUSPEND].name = "failed-suspend";
2020 stats[MSM_PM_STAT_FAILED_SUSPEND].first_bucket_time =
2021 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
2022
2023 stats[MSM_PM_STAT_NOT_IDLE].name = "not-idle";
2024 stats[MSM_PM_STAT_NOT_IDLE].first_bucket_time =
2025 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
2026 }
Murali Nalajala558c0ce2012-03-29 19:42:08 +05302027
2028 atomic_set(&msm_pm_init_done, 1);
2029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030 d_entry = create_proc_entry("msm_pm_stats",
2031 S_IRUGO | S_IWUSR | S_IWGRP, NULL);
2032 if (d_entry) {
2033 d_entry->read_proc = msm_pm_read_proc;
2034 d_entry->write_proc = msm_pm_write_proc;
2035 d_entry->data = NULL;
2036 }
2037#endif
2038
2039 return 0;
2040}
2041
2042late_initcall_sync(msm_pm_init);