blob: 616763c60d6d16ed4234b353be61efd11b67894b [file] [log] [blame]
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08001/*
2 * Copyright (C) 2002 ARM Ltd.
3 * All Rights Reserved
Matt Wagantall902c05e2012-01-31 16:39:22 -08004 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/smp.h>
13
14#include <asm/cacheflush.h>
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070015#include <asm/vfp.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080016
Matt Wagantall7cca4642012-02-01 16:43:24 -080017#include "pm.h"
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070018#include "qdss.h"
19#include "spm.h"
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070020
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080021extern volatile int pen_release;
22
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070023struct msm_hotplug_device {
24 struct completion cpu_killed;
25 unsigned int warm_boot;
26};
27
28static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_hotplug_device,
29 msm_hotplug_devices);
30
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080031static inline void cpu_enter_lowpower(void)
32{
33 /* Just flush the cache. Changing the coherency is not yet
34 * available on msm. */
35 flush_cache_all();
36}
37
38static inline void cpu_leave_lowpower(void)
39{
40}
41
42static inline void platform_do_lowpower(unsigned int cpu)
43{
44 /* Just enter wfi for now. TODO: Properly shut off the cpu. */
45 for (;;) {
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080046
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070047 msm_pm_cpu_enter_lowpower(cpu);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080048 if (pen_release == cpu) {
49 /*
50 * OK, proper wakeup, we're done
51 */
Jeff Ohlstein766ccf62012-02-07 18:26:02 -080052 pen_release = -1;
53 dmac_flush_range((void *)&pen_release,
54 (void *)(&pen_release + sizeof(pen_release)));
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080055 break;
56 }
57
58 /*
59 * getting here, means that we have come out of WFI without
60 * having been woken up - this shouldn't happen
61 *
62 * The trouble is, letting people know about this is not really
63 * possible, since we are currently running incoherently, and
64 * therefore cannot safely call printk() or anything else
65 */
Jeff Ohlstein766ccf62012-02-07 18:26:02 -080066 dmac_inv_range((void *)&pen_release,
67 (void *)(&pen_release + sizeof(pen_release)));
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080068 pr_debug("CPU%u: spurious wakeup call\n", cpu);
69 }
70}
71
72int platform_cpu_kill(unsigned int cpu)
73{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070074 struct completion *killed =
75 &per_cpu(msm_hotplug_devices, cpu).cpu_killed;
Matt Wagantall902c05e2012-01-31 16:39:22 -080076 int ret;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070077
Matt Wagantall902c05e2012-01-31 16:39:22 -080078 ret = wait_for_completion_timeout(killed, HZ * 5);
79 if (ret)
80 return ret;
81
82 return msm_pm_wait_cpu_shutdown(cpu);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080083}
84
85/*
86 * platform-specific code to shutdown a CPU
87 *
88 * Called with IRQs disabled
89 */
90void platform_cpu_die(unsigned int cpu)
91{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070092 if (unlikely(cpu != smp_processor_id())) {
93 pr_crit("%s: running on %u, should be %u\n",
94 __func__, smp_processor_id(), cpu);
95 BUG();
96 }
97 complete(&__get_cpu_var(msm_hotplug_devices).cpu_killed);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080098 /*
99 * we're ready for shutdown now, so do it
100 */
101 cpu_enter_lowpower();
102 platform_do_lowpower(cpu);
103
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700104 pr_notice("CPU%u: %s: normal wakeup\n", cpu, __func__);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800105 cpu_leave_lowpower();
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800106}
107
108int platform_cpu_disable(unsigned int cpu)
109{
110 /*
111 * we don't allow CPU 0 to be shutdown (it is still too special
112 * e.g. clock tick interrupts)
113 */
114 return cpu == 0 ? -EPERM : 0;
115}
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700116
117int msm_platform_secondary_init(unsigned int cpu)
118{
119 int ret;
120 struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices);
121
122 if (!dev->warm_boot) {
123 dev->warm_boot = 1;
124 init_completion(&dev->cpu_killed);
125 return 0;
126 }
Pratik Patel17f3b822011-11-21 12:41:47 -0800127 msm_jtag_restore_state();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700128#ifdef CONFIG_VFP
129 vfp_reinit();
130#endif
131 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
132
133 return ret;
134}