blob: 98a6354ee0d4d828e2a242ce743b45dad8304040 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Idle processing for ARMv7-based Qualcomm SoCs.
3 *
4 * Copyright (C) 2007 Google, Inc.
5 * Copyright (c) 2007-2009, 2011 Code Aurora Forum. All rights reserved.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/linkage.h>
19#include <linux/threads.h>
20#include <asm/assembler.h>
21
22#ifdef CONFIG_MSM_CPU_AVS
23/* 11 general purpose registers (r4-r14), 10 cp15 registers, 3 AVS registers */
24#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10 + 4 * 3)
25#else
26/* 11 general purpose registers (r4-r14), 10 cp15 registers */
27#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10)
28#endif
29
30ENTRY(msm_arch_idle)
31 stmfd sp!, {lr}
32#ifdef CONFIG_MSM_JTAG_V7
33 bl msm_save_jtag_debug
34#endif
35#ifdef CONFIG_MSM_ETM
36 bl etm_save_reg_check
37#endif
38 wfi
39#ifdef CONFIG_MSM_ETM
40 bl etm_restore_reg_check
41#endif
42#ifdef CONFIG_MSM_JTAG_V7
43 bl msm_restore_jtag_debug
44#endif
45 ldmfd sp!, {lr}
46 bx lr
47
48ENTRY(msm_pm_collapse)
49#if defined(CONFIG_MSM_FIQ_SUPPORT)
50 cpsid f
51#endif
52
53 ldr r0, =saved_state
54#if (NR_CPUS >= 2)
55 mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
56 ands r1, r1, #15 /* What CPU am I */
57 addne r0, r0, #CPU_SAVED_STATE_SIZE
58#endif
59
60 stmia r0!, {r4-r14}
61 mrc p15, 0, r1, c1, c0, 0 /* MMU control */
62 mrc p15, 0, r2, c2, c0, 0 /* TTBR0 */
63 mrc p15, 0, r3, c3, c0, 0 /* dacr */
64#ifdef CONFIG_ARCH_MSM_SCORPION
65 /* This instruction is not valid for non scorpion processors */
66 mrc p15, 3, r4, c15, c0, 3 /* L2CR1 is the L2 cache control reg 1 */
67#endif
68 mrc p15, 0, r5, c10, c2, 0 /* PRRR */
69 mrc p15, 0, r6, c10, c2, 1 /* NMRR */
70 mrc p15, 0, r7, c1, c0, 1 /* ACTLR */
71 mrc p15, 0, r8, c2, c0, 1 /* TTBR1 */
72 mrc p15, 0, r9, c13, c0, 3 /* TPIDRURO */
73 mrc p15, 0, ip, c13, c0, 1 /* context ID */
74 stmia r0!, {r1-r9, ip}
75#ifdef CONFIG_MSM_CPU_AVS
76 mrc p15, 7, r1, c15, c1, 7 /* AVSCSR is the Adaptive Voltage Scaling
77 * Control and Status Register */
78 mrc p15, 7, r2, c15, c0, 6 /* AVSDSCR is the Adaptive Voltage
79 * Scaling Delay Synthesizer Control
80 * Register */
81#ifndef CONFIG_ARCH_MSM_KRAIT
82 mrc p15, 7, r3, c15, c1, 0 /* TSCSR is the Temperature Status and
83 * Control Register
84 */
85#endif
86
87 stmia r0!, {r1-r3}
88#endif
89
90#ifdef CONFIG_MSM_JTAG_V7
91 bl msm_save_jtag_debug
92#endif
93#ifdef CONFIG_MSM_ETM
94 bl etm_save_reg_check
95#endif
96 bl v7_flush_dcache_all
97
98 mrc p15, 0, r1, c1, c0, 0 /* read current CR */
99 bic r0, r1, #(1 << 2) /* clear dcache bit */
100 bic r0, r0, #(1 << 12) /* clear icache bit */
101 mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
102
103 dsb
104
105 wfi
106
107 mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */
108 isb
109
110#if defined(CONFIG_MSM_FIQ_SUPPORT)
111 cpsie f
112#endif
113#ifdef CONFIG_MSM_ETM
114 bl etm_restore_reg_check
115#endif
116#ifdef CONFIG_MSM_JTAG_V7
117 bl msm_restore_jtag_debug
118#endif
119 ldr r0, =saved_state /* restore registers */
120#if (NR_CPUS >= 2)
121 mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
122 ands r1, r1, #15 /* What CPU am I */
123 addne r0, r0, #CPU_SAVED_STATE_SIZE
124#endif
125
126 ldmfd r0, {r4-r14}
127 mov r0, #0 /* return power collapse failed */
128 bx lr
129
130ENTRY(msm_pm_collapse_exit)
131#if 0 /* serial debug */
132 mov r0, #0x80000016
133 mcr p15, 0, r0, c15, c2, 4
134 mov r0, #0xA9000000
135 add r0, r0, #0x00A00000 /* UART1 */
136 /*add r0, r0, #0x00C00000*/ /* UART3 */
137 mov r1, #'A'
138 str r1, [r0, #0x00C]
139#endif
140 ldr r1, =saved_state_end
141 ldr r2, =msm_pm_collapse_exit
142 adr r3, msm_pm_collapse_exit
143 add r1, r1, r3
144 sub r1, r1, r2
145#if (NR_CPUS >= 2)
146 mrc p15, 0, r2, c0, c0, 5 /* MPIDR */
147 ands r2, r2, #15 /* What CPU am I */
148 subeq r1, r1, #CPU_SAVED_STATE_SIZE
149#endif
150
151#ifdef CONFIG_MSM_CPU_AVS
152 ldmdb r1!, {r2-r4}
153#ifndef CONFIG_ARCH_MSM_KRAIT
154 mcr p15, 7, r4, c15, c1, 0 /* TSCSR */
155#endif
156 mcr p15, 7, r3, c15, c0, 6 /* AVSDSCR */
157 mcr p15, 7, r2, c15, c1, 7 /* AVSCSR */
158#endif
159 ldmdb r1!, {r2-r11}
160 mcr p15, 0, r4, c3, c0, 0 /* dacr */
161 mcr p15, 0, r3, c2, c0, 0 /* TTBR0 */
162#ifdef CONFIG_ARCH_MSM_SCORPION
163 /* This instruction is not valid for non scorpion processors */
164 mcr p15, 3, r5, c15, c0, 3 /* L2CR1 */
165#endif
166 mcr p15, 0, r6, c10, c2, 0 /* PRRR */
167 mcr p15, 0, r7, c10, c2, 1 /* NMRR */
168 mcr p15, 0, r8, c1, c0, 1 /* ACTLR */
169 mcr p15, 0, r9, c2, c0, 1 /* TTBR1 */
170 mcr p15, 0, r10, c13, c0, 3 /* TPIDRURO */
171 mcr p15, 0, r11, c13, c0, 1 /* context ID */
172 isb
173 ldmdb r1!, {r4-r14}
174 ldr r0, =msm_pm_pc_pgd
175 ldr r1, =msm_pm_collapse_exit
176 adr r3, msm_pm_collapse_exit
177 add r0, r0, r3
178 sub r0, r0, r1
179 ldr r0, [r0]
180 mrc p15, 0, r1, c2, c0, 0 /* save current TTBR0 */
181 and r3, r1, #0x7f /* mask to get TTB flags */
182 orr r0, r0, r3 /* add TTB flags to switch TTBR value */
183 mcr p15, 0, r0, c2, c0, 0 /* temporary switch TTBR0 */
184 isb
185 mcr p15, 0, r2, c1, c0, 0 /* MMU control */
186 isb
187msm_pm_mapped_pa:
188 /* Switch to virtual */
189 ldr r0, =msm_pm_pa_to_va
190 mov pc, r0
191msm_pm_pa_to_va:
192 mcr p15, 0, r1, c2, c0, 0 /* restore TTBR0 */
193 isb
194 mcr p15, 0, r3, c8, c7, 0 /* UTLBIALL */
195 mcr p15, 0, r3, c7, c5, 6 /* BPIALL */
196 dsb
197
198 isb
199 stmfd sp!, {lr}
200 bl v7_flush_kern_cache_all
201#ifdef CONFIG_MSM_ETM
202 bl etm_restore_reg_check
203#endif
204#ifdef CONFIG_MSM_JTAG_V7
205 bl msm_restore_jtag_debug
206#endif
207 ldmfd sp!, {lr}
208 mov r0, #1
209 bx lr
210 nop
211 nop
212 nop
213 nop
214 nop
2151: b 1b
216
217ENTRY(msm_pm_boot_entry)
218 mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
219 and r0, r0, #15 /* what CPU am I */
220
221 ldr r1, =msm_pm_boot_vector
222 ldr r2, =msm_pm_boot_entry
223 adr r3, msm_pm_boot_entry
224 add r1, r1, r3 /* translate virt to phys addr */
225 sub r1, r1, r2
226
227 add r1, r1, r0, LSL #2 /* locate boot vector for our cpu */
228 ldr pc, [r1] /* jump */
229
230ENTRY(msm_pm_write_boot_vector)
231 ldr r2, =msm_pm_boot_vector
232 add r2, r2, r0, LSL #2 /* locate boot vector for our cpu */
233 str r1, [r2]
234 bx lr
235
236 .data
237
238 .globl msm_pm_pc_pgd
239msm_pm_pc_pgd:
240 .long 0x0
241
242saved_state:
243#if (NR_CPUS >= 2)
244 .space CPU_SAVED_STATE_SIZE * 2 /* This code only supports 2 cores */
245#else
246 .space CPU_SAVED_STATE_SIZE
247#endif
248saved_state_end:
249
250msm_pm_boot_vector:
251 .space 4 * NR_CPUS
252