blob: 86a237468e427a200ce9bfa7ae5451bd6c6302df [file] [log] [blame]
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08001/*
2 * Copyright (C) 2002 ARM Ltd.
3 * All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/smp.h>
12
13#include <asm/cacheflush.h>
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070014#include <asm/vfp.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080015
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070016#include "qdss.h"
17#include "spm.h"
18#include "pm.h"
19
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080020extern volatile int pen_release;
21
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070022struct msm_hotplug_device {
23 struct completion cpu_killed;
24 unsigned int warm_boot;
25};
26
27static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_hotplug_device,
28 msm_hotplug_devices);
29
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080030static inline void cpu_enter_lowpower(void)
31{
32 /* Just flush the cache. Changing the coherency is not yet
33 * available on msm. */
34 flush_cache_all();
35}
36
37static inline void cpu_leave_lowpower(void)
38{
39}
40
41static inline void platform_do_lowpower(unsigned int cpu)
42{
43 /* Just enter wfi for now. TODO: Properly shut off the cpu. */
44 for (;;) {
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080045
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070046 msm_pm_cpu_enter_lowpower(cpu);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080047 if (pen_release == cpu) {
48 /*
49 * OK, proper wakeup, we're done
50 */
51 break;
52 }
53
54 /*
55 * getting here, means that we have come out of WFI without
56 * having been woken up - this shouldn't happen
57 *
58 * The trouble is, letting people know about this is not really
59 * possible, since we are currently running incoherently, and
60 * therefore cannot safely call printk() or anything else
61 */
62 pr_debug("CPU%u: spurious wakeup call\n", cpu);
63 }
64}
65
66int platform_cpu_kill(unsigned int cpu)
67{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070068 struct completion *killed =
69 &per_cpu(msm_hotplug_devices, cpu).cpu_killed;
70
71 return wait_for_completion_timeout(killed, HZ * 5);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080072}
73
74/*
75 * platform-specific code to shutdown a CPU
76 *
77 * Called with IRQs disabled
78 */
79void platform_cpu_die(unsigned int cpu)
80{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070081 if (unlikely(cpu != smp_processor_id())) {
82 pr_crit("%s: running on %u, should be %u\n",
83 __func__, smp_processor_id(), cpu);
84 BUG();
85 }
86 complete(&__get_cpu_var(msm_hotplug_devices).cpu_killed);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080087 /*
88 * we're ready for shutdown now, so do it
89 */
90 cpu_enter_lowpower();
91 platform_do_lowpower(cpu);
92
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070093 pr_notice("CPU%u: %s: normal wakeup\n", cpu, __func__);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080094 cpu_leave_lowpower();
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080095}
96
97int platform_cpu_disable(unsigned int cpu)
98{
99 /*
100 * we don't allow CPU 0 to be shutdown (it is still too special
101 * e.g. clock tick interrupts)
102 */
103 return cpu == 0 ? -EPERM : 0;
104}
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700105
106int msm_platform_secondary_init(unsigned int cpu)
107{
108 int ret;
109 struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices);
110
111 if (!dev->warm_boot) {
112 dev->warm_boot = 1;
113 init_completion(&dev->cpu_killed);
114 return 0;
115 }
116 etm_restore_reg_check();
117 msm_restore_jtag_debug();
118#ifdef CONFIG_VFP
119 vfp_reinit();
120#endif
121 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
122
123 return ret;
124}