blob: 8db21f9930a11eb974d403c9e2b1dd100981f7ed [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/cpuidle.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/ktime.h>
22#include <linux/pm.h>
23#include <linux/pm_qos_params.h>
24#include <linux/proc_fs.h>
25#include <linux/smp.h>
26#include <linux/suspend.h>
27#include <linux/tick.h>
28#include <linux/uaccess.h>
29#include <linux/wakelock.h>
30#include <mach/msm_iomap.h>
31#include <mach/system.h>
32#include <asm/cacheflush.h>
33#include <asm/hardware/gic.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#ifdef CONFIG_VFP
37#include <asm/vfp.h>
38#endif
39
40#include "acpuclock.h"
41#include "clock.h"
42#include "avs.h"
43#include "cpuidle.h"
44#include "idle.h"
45#include "pm.h"
46#include "rpm_resources.h"
47#include "scm-boot.h"
48#include "spm.h"
49#include "timer.h"
Pratik Patele5771792011-09-17 18:33:54 -070050#include "qdss.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060051#include "pm-boot.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
53/******************************************************************************
54 * Debug Definitions
55 *****************************************************************************/
56
57enum {
58 MSM_PM_DEBUG_SUSPEND = BIT(0),
59 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
60 MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
61 MSM_PM_DEBUG_CLOCK = BIT(3),
62 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
63 MSM_PM_DEBUG_IDLE = BIT(6),
64 MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
65 MSM_PM_DEBUG_HOTPLUG = BIT(8),
66};
67
68static int msm_pm_debug_mask = 1;
69module_param_named(
70 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
71);
72
73
74/******************************************************************************
75 * Sleep Modes and Parameters
76 *****************************************************************************/
77
78static struct msm_pm_platform_data *msm_pm_modes;
79static int rpm_cpu0_wakeup_irq;
80
81void __init msm_pm_set_platform_data(
82 struct msm_pm_platform_data *data, int count)
83{
84 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
85 msm_pm_modes = data;
86}
87
88void __init msm_pm_set_rpm_wakeup_irq(unsigned int irq)
89{
90 rpm_cpu0_wakeup_irq = irq;
91}
92
93enum {
94 MSM_PM_MODE_ATTR_SUSPEND,
95 MSM_PM_MODE_ATTR_IDLE,
96 MSM_PM_MODE_ATTR_NR,
97};
98
99static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
100 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
101 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
102};
103
104struct msm_pm_kobj_attribute {
105 unsigned int cpu;
106 struct kobj_attribute ka;
107};
108
109#define GET_CPU_OF_ATTR(attr) \
110 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
111
112struct msm_pm_sysfs_sleep_mode {
113 struct kobject *kobj;
114 struct attribute_group attr_group;
115 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
116 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
117};
118
119static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
120 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
121 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
122 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
123 "standalone_power_collapse",
124};
125
126/*
127 * Write out the attribute.
128 */
129static ssize_t msm_pm_mode_attr_show(
130 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
131{
132 int ret = -EINVAL;
133 int i;
134
135 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
136 struct kernel_param kp;
137 unsigned int cpu;
138 struct msm_pm_platform_data *mode;
139
140 if (msm_pm_sleep_mode_labels[i] == NULL)
141 continue;
142
143 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
144 continue;
145
146 cpu = GET_CPU_OF_ATTR(attr);
147 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
148
149 if (!strcmp(attr->attr.name,
150 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
151 u32 arg = mode->suspend_enabled;
152 kp.arg = &arg;
153 ret = param_get_ulong(buf, &kp);
154 } else if (!strcmp(attr->attr.name,
155 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
156 u32 arg = mode->idle_enabled;
157 kp.arg = &arg;
158 ret = param_get_ulong(buf, &kp);
159 }
160
161 break;
162 }
163
164 if (ret > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600165 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166 ret++;
167 }
168
169 return ret;
170}
171
172/*
173 * Read in the new attribute value.
174 */
175static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
176 struct kobj_attribute *attr, const char *buf, size_t count)
177{
178 int ret = -EINVAL;
179 int i;
180
181 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
182 struct kernel_param kp;
183 unsigned int cpu;
184 struct msm_pm_platform_data *mode;
185
186 if (msm_pm_sleep_mode_labels[i] == NULL)
187 continue;
188
189 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
190 continue;
191
192 cpu = GET_CPU_OF_ATTR(attr);
193 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
194
195 if (!strcmp(attr->attr.name,
196 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
197 kp.arg = &mode->suspend_enabled;
198 ret = param_set_byte(buf, &kp);
199 } else if (!strcmp(attr->attr.name,
200 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
201 kp.arg = &mode->idle_enabled;
202 ret = param_set_byte(buf, &kp);
203 }
204
205 break;
206 }
207
208 return ret ? ret : count;
209}
210
211/*
212 * Add sysfs entries for one cpu.
213 */
214static int __init msm_pm_mode_sysfs_add_cpu(
215 unsigned int cpu, struct kobject *modes_kobj)
216{
217 char cpu_name[8];
218 struct kobject *cpu_kobj;
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600219 struct msm_pm_sysfs_sleep_mode *mode = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 int i, j, k;
221 int ret;
222
223 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
224 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
225 if (!cpu_kobj) {
226 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
227 ret = -ENOMEM;
228 goto mode_sysfs_add_cpu_exit;
229 }
230
231 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
232 int idx = MSM_PM_MODE(cpu, i);
233
234 if ((!msm_pm_modes[idx].suspend_supported)
235 && (!msm_pm_modes[idx].idle_supported))
236 continue;
237
238 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
239 if (!mode) {
240 pr_err("%s: cannot allocate memory for attributes\n",
241 __func__);
242 ret = -ENOMEM;
243 goto mode_sysfs_add_cpu_exit;
244 }
245
246 mode->kobj = kobject_create_and_add(
247 msm_pm_sleep_mode_labels[i], cpu_kobj);
248 if (!mode->kobj) {
249 pr_err("%s: cannot create kobject\n", __func__);
250 ret = -ENOMEM;
251 goto mode_sysfs_add_cpu_exit;
252 }
253
254 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
255 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
256 !msm_pm_modes[idx].idle_supported)
257 continue;
258 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
259 !msm_pm_modes[idx].suspend_supported)
260 continue;
261 mode->kas[j].cpu = cpu;
262 mode->kas[j].ka.attr.mode = 0644;
263 mode->kas[j].ka.show = msm_pm_mode_attr_show;
264 mode->kas[j].ka.store = msm_pm_mode_attr_store;
265 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
266 mode->attrs[j] = &mode->kas[j].ka.attr;
267 j++;
268 }
269 mode->attrs[j] = NULL;
270
271 mode->attr_group.attrs = mode->attrs;
272 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
273 if (ret) {
274 pr_err("%s: cannot create kobject attribute group\n",
275 __func__);
276 goto mode_sysfs_add_cpu_exit;
277 }
278 }
279
280 ret = 0;
281
282mode_sysfs_add_cpu_exit:
Praveen Chidambaramd5ac2d32011-10-24 14:30:27 -0600283 if (ret) {
Praveen Chidambaram2cfda632011-10-11 16:58:09 -0600284 if (mode && mode->kobj)
285 kobject_del(mode->kobj);
286 kfree(mode);
287 }
288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289 return ret;
290}
291
292/*
293 * Add sysfs entries for the sleep modes.
294 */
295static int __init msm_pm_mode_sysfs_add(void)
296{
297 struct kobject *module_kobj;
298 struct kobject *modes_kobj;
299 unsigned int cpu;
300 int ret;
301
302 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
303 if (!module_kobj) {
304 pr_err("%s: cannot find kobject for module %s\n",
305 __func__, KBUILD_MODNAME);
306 ret = -ENOENT;
307 goto mode_sysfs_add_exit;
308 }
309
310 modes_kobj = kobject_create_and_add("modes", module_kobj);
311 if (!modes_kobj) {
312 pr_err("%s: cannot create modes kobject\n", __func__);
313 ret = -ENOMEM;
314 goto mode_sysfs_add_exit;
315 }
316
317 for_each_possible_cpu(cpu) {
318 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
319 if (ret)
320 goto mode_sysfs_add_exit;
321 }
322
323 ret = 0;
324
325mode_sysfs_add_exit:
326 return ret;
327}
328
329/******************************************************************************
330 * CONFIG_MSM_IDLE_STATS
331 *****************************************************************************/
332
333#ifdef CONFIG_MSM_IDLE_STATS
334enum msm_pm_time_stats_id {
335 MSM_PM_STAT_REQUESTED_IDLE,
336 MSM_PM_STAT_IDLE_WFI,
337 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
338 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
339 MSM_PM_STAT_SUSPEND,
340 MSM_PM_STAT_COUNT
341};
342
343struct msm_pm_time_stats {
344 const char *name;
345 int64_t first_bucket_time;
346 int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
347 int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
348 int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
349 int count;
350 int64_t total_time;
351};
352
353struct msm_pm_cpu_time_stats {
354 struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
355};
356
357static DEFINE_SPINLOCK(msm_pm_stats_lock);
358static DEFINE_PER_CPU_SHARED_ALIGNED(
359 struct msm_pm_cpu_time_stats, msm_pm_stats);
360
361/*
362 * Add the given time data to the statistics collection.
363 */
364static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
365{
366 unsigned long flags;
367 struct msm_pm_time_stats *stats;
368 int64_t bt;
369 int i;
370
371 spin_lock_irqsave(&msm_pm_stats_lock, flags);
372 stats = __get_cpu_var(msm_pm_stats).stats;
373
374 stats[id].total_time += t;
375 stats[id].count++;
376
377 bt = t;
378 do_div(bt, stats[id].first_bucket_time);
379
380 if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
381 (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
382 i = DIV_ROUND_UP(fls((uint32_t)bt),
383 CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
384 else
385 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
386
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600387 if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
388 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
389
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 stats[id].bucket[i]++;
391
392 if (t < stats[id].min_time[i] || !stats[id].max_time[i])
393 stats[id].min_time[i] = t;
394 if (t > stats[id].max_time[i])
395 stats[id].max_time[i] = t;
396
397 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
398}
399
400/*
401 * Helper function of snprintf where buf is auto-incremented, size is auto-
402 * decremented, and there is no return value.
403 *
404 * NOTE: buf and size must be l-values (e.g. variables)
405 */
406#define SNPRINTF(buf, size, format, ...) \
407 do { \
408 if (size > 0) { \
409 int ret; \
410 ret = snprintf(buf, size, format, ## __VA_ARGS__); \
411 if (ret > size) { \
412 buf += size; \
413 size = 0; \
414 } else { \
415 buf += ret; \
416 size -= ret; \
417 } \
418 } \
419 } while (0)
420
421/*
422 * Write out the power management statistics.
423 */
424static int msm_pm_read_proc
425 (char *page, char **start, off_t off, int count, int *eof, void *data)
426{
427 unsigned int cpu = off / MSM_PM_STAT_COUNT;
428 int id = off % MSM_PM_STAT_COUNT;
429 char *p = page;
430
431 if (count < 1024) {
432 *start = (char *) 0;
433 *eof = 0;
434 return 0;
435 }
436
437 if (cpu < num_possible_cpus()) {
438 unsigned long flags;
439 struct msm_pm_time_stats *stats;
440 int i;
441 int64_t bucket_time;
442 int64_t s;
443 uint32_t ns;
444
445 spin_lock_irqsave(&msm_pm_stats_lock, flags);
446 stats = per_cpu(msm_pm_stats, cpu).stats;
447
448 s = stats[id].total_time;
449 ns = do_div(s, NSEC_PER_SEC);
450 SNPRINTF(p, count,
451 "[cpu %u] %s:\n"
452 " count: %7d\n"
453 " total_time: %lld.%09u\n",
454 cpu, stats[id].name,
455 stats[id].count,
456 s, ns);
457
458 bucket_time = stats[id].first_bucket_time;
459 for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
460 s = bucket_time;
461 ns = do_div(s, NSEC_PER_SEC);
462 SNPRINTF(p, count,
463 " <%6lld.%09u: %7d (%lld-%lld)\n",
464 s, ns, stats[id].bucket[i],
465 stats[id].min_time[i],
466 stats[id].max_time[i]);
467
468 bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
469 }
470
471 SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
472 s, ns, stats[id].bucket[i],
473 stats[id].min_time[i],
474 stats[id].max_time[i]);
475
476 *start = (char *) 1;
477 *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
478
479 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
480 }
481
482 return p - page;
483}
484#undef SNPRINTF
485
486#define MSM_PM_STATS_RESET "reset"
487
488/*
489 * Reset the power management statistics values.
490 */
491static int msm_pm_write_proc(struct file *file, const char __user *buffer,
492 unsigned long count, void *data)
493{
494 char buf[sizeof(MSM_PM_STATS_RESET)];
495 int ret;
496 unsigned long flags;
497 unsigned int cpu;
498
499 if (count < strlen(MSM_PM_STATS_RESET)) {
500 ret = -EINVAL;
501 goto write_proc_failed;
502 }
503
504 if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
505 ret = -EFAULT;
506 goto write_proc_failed;
507 }
508
509 if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
510 ret = -EINVAL;
511 goto write_proc_failed;
512 }
513
514 spin_lock_irqsave(&msm_pm_stats_lock, flags);
515 for_each_possible_cpu(cpu) {
516 struct msm_pm_time_stats *stats;
517 int i;
518
519 stats = per_cpu(msm_pm_stats, cpu).stats;
520 for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
521 memset(stats[i].bucket,
522 0, sizeof(stats[i].bucket));
523 memset(stats[i].min_time,
524 0, sizeof(stats[i].min_time));
525 memset(stats[i].max_time,
526 0, sizeof(stats[i].max_time));
527 stats[i].count = 0;
528 stats[i].total_time = 0;
529 }
530 }
531
532 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
533 return count;
534
535write_proc_failed:
536 return ret;
537}
538#undef MSM_PM_STATS_RESET
539#endif /* CONFIG_MSM_IDLE_STATS */
540
541
542/******************************************************************************
543 * Configure Hardware before/after Low Power Mode
544 *****************************************************************************/
545
546/*
547 * Configure hardware registers in preparation for Apps power down.
548 */
549static void msm_pm_config_hw_before_power_down(void)
550{
551 return;
552}
553
554/*
555 * Clear hardware registers after Apps powers up.
556 */
557static void msm_pm_config_hw_after_power_up(void)
558{
559 return;
560}
561
562/*
563 * Configure hardware registers in preparation for SWFI.
564 */
565static void msm_pm_config_hw_before_swfi(void)
566{
567 return;
568}
569
570
571/******************************************************************************
572 * Suspend Max Sleep Time
573 *****************************************************************************/
574
575#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
576static int msm_pm_sleep_time_override;
577module_param_named(sleep_time_override,
578 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
579#endif
580
581#define SCLK_HZ (32768)
582#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
583
584static uint32_t msm_pm_max_sleep_time;
585
586/*
587 * Convert time from nanoseconds to slow clock ticks, then cap it to the
588 * specified limit
589 */
590static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
591{
592 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
593 return (time_ns > limit) ? limit : time_ns;
594}
595
596/*
597 * Set the sleep time for suspend. 0 means infinite sleep time.
598 */
599void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
600{
601 if (max_sleep_time_ns == 0) {
602 msm_pm_max_sleep_time = 0;
603 } else {
604 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
605 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
606
607 if (msm_pm_max_sleep_time == 0)
608 msm_pm_max_sleep_time = 1;
609 }
610
611 if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
612 pr_info("%s: Requested %lld ns Giving %u sclk ticks\n",
613 __func__, max_sleep_time_ns, msm_pm_max_sleep_time);
614}
615EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
616
617
618/******************************************************************************
619 *
620 *****************************************************************************/
621
622struct msm_pm_device {
623 unsigned int cpu;
624#ifdef CONFIG_HOTPLUG_CPU
625 struct completion cpu_killed;
626 unsigned int warm_boot;
627#endif
628};
629
630static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_pm_device, msm_pm_devices);
631static struct msm_rpmrs_limits *msm_pm_idle_rs_limits;
632
633static void msm_pm_swfi(void)
634{
635 msm_pm_config_hw_before_swfi();
636 msm_arch_idle();
637}
638
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600639static bool msm_pm_spm_power_collapse(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 struct msm_pm_device *dev, bool from_idle, bool notify_rpm)
641{
642 void *entry;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600643 bool collapsed = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 int ret;
645
646 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
647 pr_info("CPU%u: %s: notify_rpm %d\n",
648 dev->cpu, __func__, (int) notify_rpm);
649
650 ret = msm_spm_set_low_power_mode(
651 MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
652 WARN_ON(ret);
653
654 entry = (!dev->cpu || from_idle) ?
655 msm_pm_collapse_exit : msm_secondary_startup;
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600656 msm_pm_boot_config_before_pc(dev->cpu, virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657
658 if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
659 pr_info("CPU%u: %s: program vector to %p\n",
660 dev->cpu, __func__, entry);
661
662#ifdef CONFIG_VFP
663 vfp_flush_context();
664#endif
665
666 collapsed = msm_pm_collapse();
667
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600668 msm_pm_boot_config_after_pc(dev->cpu);
669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 if (collapsed) {
671#ifdef CONFIG_VFP
672 vfp_reinit();
673#endif
674 cpu_init();
675 writel(0xF0, MSM_QGIC_CPU_BASE + GIC_CPU_PRIMASK);
676 writel(1, MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
677 local_fiq_enable();
678 }
679
680 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
681 pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
682 dev->cpu, __func__, collapsed);
683
684 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
685 WARN_ON(ret);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600686 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687}
688
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600689static bool msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690{
691 struct msm_pm_device *dev = &__get_cpu_var(msm_pm_devices);
692 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600693 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694
695 avsdscr_setting = avs_get_avsdscr();
696 avs_disable();
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600697 collapsed = msm_pm_spm_power_collapse(dev, from_idle, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 avs_reset_delays(avsdscr_setting);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600699 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700}
701
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600702static bool msm_pm_power_collapse(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703{
704 struct msm_pm_device *dev = &__get_cpu_var(msm_pm_devices);
705 unsigned long saved_acpuclk_rate;
706 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600707 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708
709 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
710 pr_info("CPU%u: %s: idle %d\n",
711 dev->cpu, __func__, (int)from_idle);
712
713 msm_pm_config_hw_before_power_down();
714 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
715 pr_info("CPU%u: %s: pre power down\n", dev->cpu, __func__);
716
717 avsdscr_setting = avs_get_avsdscr();
718 avs_disable();
719
720 if (cpu_online(dev->cpu))
721 saved_acpuclk_rate = acpuclk_power_collapse();
722 else
723 saved_acpuclk_rate = 0;
724
725 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
726 pr_info("CPU%u: %s: change clock rate (old rate = %lu)\n",
727 dev->cpu, __func__, saved_acpuclk_rate);
728
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600729 collapsed = msm_pm_spm_power_collapse(dev, from_idle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730
731 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
732 pr_info("CPU%u: %s: restore clock rate to %lu\n",
733 dev->cpu, __func__, saved_acpuclk_rate);
734 if (acpuclk_set_rate(dev->cpu, saved_acpuclk_rate, SETRATE_PC) < 0)
735 pr_err("CPU%u: %s: failed to restore clock rate(%lu)\n",
736 dev->cpu, __func__, saved_acpuclk_rate);
737
738 avs_reset_delays(avsdscr_setting);
739 msm_pm_config_hw_after_power_up();
740 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
741 pr_info("CPU%u: %s: post power up\n", dev->cpu, __func__);
742
743 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
744 pr_info("CPU%u: %s: return\n", dev->cpu, __func__);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600745 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746}
747
748static irqreturn_t msm_pm_rpm_wakeup_interrupt(int irq, void *dev_id)
749{
750 if (dev_id != &msm_pm_rpm_wakeup_interrupt)
751 return IRQ_NONE;
752
753 return IRQ_HANDLED;
754}
755
756
757/******************************************************************************
758 * External Idle/Suspend Functions
759 *****************************************************************************/
760
761void arch_idle(void)
762{
763 return;
764}
765
766int msm_pm_idle_prepare(struct cpuidle_device *dev)
767{
768 uint32_t latency_us;
769 uint32_t sleep_us;
770 int i;
771
772 latency_us = (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
773 sleep_us = (uint32_t) ktime_to_ns(tick_nohz_get_sleep_length());
774 sleep_us = DIV_ROUND_UP(sleep_us, 1000);
775
776 for (i = 0; i < dev->state_count; i++) {
777 struct cpuidle_state *state = &dev->states[i];
778 enum msm_pm_sleep_mode mode;
779 bool allow;
780 struct msm_rpmrs_limits *rs_limits = NULL;
781 int idx;
782
783 mode = (enum msm_pm_sleep_mode) state->driver_data;
784 idx = MSM_PM_MODE(dev->cpu, mode);
785
786 allow = msm_pm_modes[idx].idle_enabled &&
787 msm_pm_modes[idx].idle_supported;
788
789 switch (mode) {
790 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
791 if (!allow)
792 break;
793
794 if (num_online_cpus() > 1) {
795 allow = false;
796 break;
797 }
798#ifdef CONFIG_HAS_WAKELOCK
799 if (has_wake_lock(WAKE_LOCK_IDLE)) {
800 allow = false;
801 break;
802 }
803#endif
804 /* fall through */
805
806 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
807 if (!allow)
808 break;
809
810 if (!dev->cpu &&
811 msm_rpm_local_request_is_outstanding()) {
812 allow = false;
813 break;
814 }
815 /* fall through */
816
817 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
818 if (!allow)
819 break;
820
821 rs_limits = msm_rpmrs_lowest_limits(true,
822 mode, latency_us, sleep_us);
823
824 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
825 pr_info("CPU%u: %s: %s, latency %uus, "
826 "sleep %uus, limit %p\n",
827 dev->cpu, __func__, state->desc,
828 latency_us, sleep_us, rs_limits);
829
830 if ((MSM_PM_DEBUG_IDLE_LIMITS & msm_pm_debug_mask) &&
831 rs_limits)
832 pr_info("CPU%u: %s: limit %p: "
833 "pxo %d, l2_cache %d, "
834 "vdd_mem %d, vdd_dig %d\n",
835 dev->cpu, __func__, rs_limits,
836 rs_limits->pxo,
837 rs_limits->l2_cache,
838 rs_limits->vdd_mem,
839 rs_limits->vdd_dig);
840
841 if (!rs_limits)
842 allow = false;
843 break;
844
845 default:
846 allow = false;
847 break;
848 }
849
850 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
851 pr_info("CPU%u: %s: allow %s: %d\n",
852 dev->cpu, __func__, state->desc, (int)allow);
853
854 if (allow) {
855 state->flags &= ~CPUIDLE_FLAG_IGNORE;
856 state->target_residency = 0;
857 state->exit_latency = 0;
858 state->power_usage = rs_limits->power[dev->cpu];
859
860 if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
861 msm_pm_idle_rs_limits = rs_limits;
862 } else {
863 state->flags |= CPUIDLE_FLAG_IGNORE;
864 }
865 }
866
867 return 0;
868}
869
870int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
871{
872 int64_t time;
873#ifdef CONFIG_MSM_IDLE_STATS
874 int exit_stat;
875#endif
876
877 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
878 pr_info("CPU%u: %s: mode %d\n",
879 smp_processor_id(), __func__, sleep_mode);
880
881 time = ktime_to_ns(ktime_get());
882
883 switch (sleep_mode) {
884 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
885 msm_pm_swfi();
886#ifdef CONFIG_MSM_IDLE_STATS
887 exit_stat = MSM_PM_STAT_IDLE_WFI;
888#endif
889 break;
890
891 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
892 msm_pm_power_collapse_standalone(true);
893#ifdef CONFIG_MSM_IDLE_STATS
894 exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
895#endif
896 break;
897
898 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: {
899 int64_t timer_expiration = msm_timer_enter_idle();
900 bool timer_halted = false;
901 uint32_t sleep_delay;
902 int ret;
903 int notify_rpm =
904 (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600905 int collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906
907 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
908 timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
909 if (sleep_delay == 0) /* 0 would mean infinite time */
910 sleep_delay = 1;
911
912 ret = msm_rpmrs_enter_sleep(
913 sleep_delay, msm_pm_idle_rs_limits, true, notify_rpm);
914 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600915 collapsed = msm_pm_power_collapse(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 timer_halted = true;
917
918 msm_rpmrs_exit_sleep(msm_pm_idle_rs_limits, true,
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600919 notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 }
921
922 msm_timer_exit_idle((int) timer_halted);
923#ifdef CONFIG_MSM_IDLE_STATS
924 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
925#endif
926 break;
927 }
928
929 default:
930 __WARN();
931 goto cpuidle_enter_bail;
932 }
933
934 time = ktime_to_ns(ktime_get()) - time;
935#ifdef CONFIG_MSM_IDLE_STATS
936 msm_pm_add_stat(exit_stat, time);
937#endif
938
939 do_div(time, 1000);
940 return (int) time;
941
942cpuidle_enter_bail:
943 return 0;
944}
945
946static int msm_pm_enter(suspend_state_t state)
947{
948 bool allow[MSM_PM_SLEEP_MODE_NR];
949 int i;
950
951#ifdef CONFIG_MSM_IDLE_STATS
952 int64_t period = 0;
953 int64_t time = msm_timer_get_sclk_time(&period);
954#endif
955
956 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
957 pr_info("%s\n", __func__);
958
959 if (smp_processor_id()) {
960 __WARN();
961 goto enter_exit;
962 }
963
964
965 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
966 struct msm_pm_platform_data *mode;
967
968 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
969 allow[i] = mode->suspend_supported && mode->suspend_enabled;
970 }
971
972 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
973 struct msm_rpmrs_limits *rs_limits;
974 int ret;
975
976 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
977 pr_info("%s: power collapse\n", __func__);
978
979 clock_debug_print_enabled();
980
981#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
982 if (msm_pm_sleep_time_override > 0) {
983 int64_t ns = NSEC_PER_SEC *
984 (int64_t) msm_pm_sleep_time_override;
985 msm_pm_set_max_sleep_time(ns);
986 msm_pm_sleep_time_override = 0;
987 }
988#endif /* CONFIG_MSM_SLEEP_TIME_OVERRIDE */
989
990 if (MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask)
991 msm_rpmrs_show_resources();
992
993 rs_limits = msm_rpmrs_lowest_limits(false,
994 MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1, -1);
995
996 if ((MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask) &&
997 rs_limits)
998 pr_info("%s: limit %p: pxo %d, l2_cache %d, "
999 "vdd_mem %d, vdd_dig %d\n",
1000 __func__, rs_limits,
1001 rs_limits->pxo, rs_limits->l2_cache,
1002 rs_limits->vdd_mem, rs_limits->vdd_dig);
1003
1004 if (rs_limits) {
1005 ret = msm_rpmrs_enter_sleep(
1006 msm_pm_max_sleep_time, rs_limits, false, true);
1007 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -06001008 int collapsed = msm_pm_power_collapse(false);
1009 msm_rpmrs_exit_sleep(rs_limits, false, true,
1010 collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 }
1012 } else {
1013 pr_err("%s: cannot find the lowest power limit\n",
1014 __func__);
1015 }
1016
1017#ifdef CONFIG_MSM_IDLE_STATS
1018 if (time != 0) {
1019 int64_t end_time = msm_timer_get_sclk_time(NULL);
1020 if (end_time != 0) {
1021 time = end_time - time;
1022 if (time < 0)
1023 time += period;
1024 } else
1025 time = 0;
1026 }
1027
1028 msm_pm_add_stat(MSM_PM_STAT_SUSPEND, time);
1029#endif /* CONFIG_MSM_IDLE_STATS */
1030 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1031 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1032 pr_info("%s: standalone power collapse\n", __func__);
1033 msm_pm_power_collapse_standalone(false);
1034 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1035 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1036 pr_info("%s: swfi\n", __func__);
1037 msm_pm_swfi();
1038 }
1039
1040
1041enter_exit:
1042 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1043 pr_info("%s: return\n", __func__);
1044
1045 return 0;
1046}
1047
1048static struct platform_suspend_ops msm_pm_ops = {
1049 .enter = msm_pm_enter,
1050 .valid = suspend_valid_only_mem,
1051};
1052
1053#ifdef CONFIG_HOTPLUG_CPU
1054int platform_cpu_disable(unsigned int cpu)
1055{
1056 return cpu == 0 ? -EPERM : 0;
1057}
1058
1059int platform_cpu_kill(unsigned int cpu)
1060{
1061 struct completion *killed = &per_cpu(msm_pm_devices, cpu).cpu_killed;
1062 return wait_for_completion_timeout(killed, HZ * 5);
1063}
1064
1065void platform_cpu_die(unsigned int cpu)
1066{
1067 bool allow[MSM_PM_SLEEP_MODE_NR];
1068 int i;
1069
1070 if (unlikely(cpu != smp_processor_id())) {
1071 pr_crit("%s: running on %u, should be %u\n",
1072 __func__, smp_processor_id(), cpu);
1073 BUG();
1074 }
1075
1076 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1077 struct msm_pm_platform_data *mode;
1078
1079 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
1080 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1081 }
1082
1083 if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
1084 pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
1085 complete(&__get_cpu_var(msm_pm_devices).cpu_killed);
1086
1087 flush_cache_all();
1088
1089 for (;;) {
1090 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1091 msm_pm_power_collapse(false);
1092 else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE])
1093 msm_pm_power_collapse_standalone(false);
1094 else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT])
1095 msm_pm_swfi();
1096
1097 if (pen_release == cpu) {
1098 /* OK, proper wakeup, we're done */
1099 break;
1100 }
1101 }
1102
1103 pen_release = -1;
1104 pr_notice("CPU%u: %s: normal wakeup\n", cpu, __func__);
1105}
1106
1107int msm_pm_platform_secondary_init(unsigned int cpu)
1108{
1109 int ret;
1110 struct msm_pm_device *dev = &__get_cpu_var(msm_pm_devices);
1111
1112 if (!dev->warm_boot) {
1113 dev->warm_boot = 1;
1114 return 0;
1115 }
Pratik Patele5771792011-09-17 18:33:54 -07001116 etm_restore_reg_check();
Pratik Patelfd6f56a2011-10-10 17:47:55 -07001117 msm_restore_jtag_debug();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118#ifdef CONFIG_VFP
1119 vfp_reinit();
1120#endif
1121 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
1122
1123 return ret;
1124}
1125#endif /* CONFIG_HOTPLUG_CPU */
1126
1127/******************************************************************************
1128 * Initialization routine
1129 *****************************************************************************/
1130
1131static int __init msm_pm_init(void)
1132{
1133 pgd_t *pc_pgd;
1134 pmd_t *pmd;
1135 unsigned long pmdval;
1136 unsigned int cpu;
1137#ifdef CONFIG_MSM_IDLE_STATS
1138 struct proc_dir_entry *d_entry;
1139#endif
1140 int ret;
1141
1142 /* Page table for cores to come back up safely. */
1143 pc_pgd = pgd_alloc(&init_mm);
1144 if (!pc_pgd)
1145 return -ENOMEM;
1146
1147 pmd = pmd_offset(pc_pgd +
1148 pgd_index(virt_to_phys(msm_pm_collapse_exit)),
1149 virt_to_phys(msm_pm_collapse_exit));
1150 pmdval = (virt_to_phys(msm_pm_collapse_exit) & PGDIR_MASK) |
1151 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1152 pmd[0] = __pmd(pmdval);
1153 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1154
1155 /* It is remotely possible that the code in msm_pm_collapse_exit()
1156 * which turns on the MMU with this mapping is in the
1157 * next even-numbered megabyte beyond the
1158 * start of msm_pm_collapse_exit().
1159 * Map this megabyte in as well.
1160 */
1161 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1162 flush_pmd_entry(pmd);
1163 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
1164
1165 ret = request_irq(rpm_cpu0_wakeup_irq,
1166 msm_pm_rpm_wakeup_interrupt, IRQF_TRIGGER_RISING,
1167 "pm_drv", msm_pm_rpm_wakeup_interrupt);
1168 if (ret) {
1169 pr_err("%s: failed to request irq %u: %d\n",
1170 __func__, rpm_cpu0_wakeup_irq, ret);
1171 return ret;
1172 }
1173
1174 ret = irq_set_irq_wake(rpm_cpu0_wakeup_irq, 1);
1175 if (ret) {
1176 pr_err("%s: failed to set wakeup irq %u: %d\n",
1177 __func__, rpm_cpu0_wakeup_irq, ret);
1178 return ret;
1179 }
1180
1181 for_each_possible_cpu(cpu) {
1182 struct msm_pm_device *dev = &per_cpu(msm_pm_devices, cpu);
1183
1184 dev->cpu = cpu;
1185#ifdef CONFIG_HOTPLUG_CPU
1186 init_completion(&dev->cpu_killed);
1187#endif
1188 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189
1190#ifdef CONFIG_MSM_IDLE_STATS
1191 for_each_possible_cpu(cpu) {
1192 struct msm_pm_time_stats *stats =
1193 per_cpu(msm_pm_stats, cpu).stats;
1194
1195 stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
1196 stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
1197 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1198
1199 stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
1200 stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
1201 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1202
1203 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
1204 "idle-standalone-power-collapse";
1205 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
1206 first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1207
1208 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
1209 "idle-power-collapse";
1210 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
1211 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1212
1213 stats[MSM_PM_STAT_SUSPEND].name = "suspend";
1214 stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
1215 CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
1216 }
1217
1218 d_entry = create_proc_entry("msm_pm_stats",
1219 S_IRUGO | S_IWUSR | S_IWGRP, NULL);
1220 if (d_entry) {
1221 d_entry->read_proc = msm_pm_read_proc;
1222 d_entry->write_proc = msm_pm_write_proc;
1223 d_entry->data = NULL;
1224 }
1225#endif /* CONFIG_MSM_IDLE_STATS */
1226
1227 msm_pm_mode_sysfs_add();
1228 msm_spm_allow_x_cpu_set_vdd(false);
1229
1230 suspend_set_ops(&msm_pm_ops);
1231 msm_cpuidle_init();
1232
1233 return 0;
1234}
1235
1236late_initcall(msm_pm_init);