blob: 660884ce5fb3a5259709a55e640171c8892620bf [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Idle processing for ARMv7-based Qualcomm SoCs.
3 *
4 * Copyright (C) 2007 Google, Inc.
Pratik Patel17f3b822011-11-21 12:41:47 -08005 * Copyright (c) 2007-2009, 2011-2012 Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/linkage.h>
19#include <linux/threads.h>
20#include <asm/assembler.h>
21
22#ifdef CONFIG_MSM_CPU_AVS
23/* 11 general purpose registers (r4-r14), 10 cp15 registers, 3 AVS registers */
24#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10 + 4 * 3)
25#else
26/* 11 general purpose registers (r4-r14), 10 cp15 registers */
27#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10)
28#endif
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -060029#ifdef CONFIG_ARCH_MSM_KRAIT
30#define SCM_SVC_BOOT 0x1
31#define SCM_CMD_TERMINATE_PC 0x2
32#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34ENTRY(msm_arch_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035 wfi
Pratik Patelcbcc1f02011-11-08 12:58:00 -080036#ifdef CONFIG_ARCH_MSM8X60
37 mrc p14, 1, r1, c1, c5, 4 /* read ETM PDSR to clear sticky bit */
38 mrc p14, 0, r1, c1, c5, 4 /* read DBG PRSR to clear sticky bit */
39 isb
40#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041 bx lr
42
43ENTRY(msm_pm_collapse)
44#if defined(CONFIG_MSM_FIQ_SUPPORT)
45 cpsid f
46#endif
47
48 ldr r0, =saved_state
49#if (NR_CPUS >= 2)
50 mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
51 ands r1, r1, #15 /* What CPU am I */
52 addne r0, r0, #CPU_SAVED_STATE_SIZE
53#endif
54
55 stmia r0!, {r4-r14}
56 mrc p15, 0, r1, c1, c0, 0 /* MMU control */
57 mrc p15, 0, r2, c2, c0, 0 /* TTBR0 */
58 mrc p15, 0, r3, c3, c0, 0 /* dacr */
59#ifdef CONFIG_ARCH_MSM_SCORPION
60 /* This instruction is not valid for non scorpion processors */
61 mrc p15, 3, r4, c15, c0, 3 /* L2CR1 is the L2 cache control reg 1 */
62#endif
63 mrc p15, 0, r5, c10, c2, 0 /* PRRR */
64 mrc p15, 0, r6, c10, c2, 1 /* NMRR */
65 mrc p15, 0, r7, c1, c0, 1 /* ACTLR */
66 mrc p15, 0, r8, c2, c0, 1 /* TTBR1 */
67 mrc p15, 0, r9, c13, c0, 3 /* TPIDRURO */
68 mrc p15, 0, ip, c13, c0, 1 /* context ID */
69 stmia r0!, {r1-r9, ip}
70#ifdef CONFIG_MSM_CPU_AVS
71 mrc p15, 7, r1, c15, c1, 7 /* AVSCSR is the Adaptive Voltage Scaling
72 * Control and Status Register */
73 mrc p15, 7, r2, c15, c0, 6 /* AVSDSCR is the Adaptive Voltage
74 * Scaling Delay Synthesizer Control
75 * Register */
76#ifndef CONFIG_ARCH_MSM_KRAIT
77 mrc p15, 7, r3, c15, c1, 0 /* TSCSR is the Temperature Status and
78 * Control Register
79 */
80#endif
81
82 stmia r0!, {r1-r3}
83#endif
84
Pratik Patel17f3b822011-11-21 12:41:47 -080085#ifdef CONFIG_MSM_JTAG
86 bl msm_jtag_save_state
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087#endif
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -060088
89 ldr r0, =msm_pm_flush_l2_flag
90 ldr r0, [r0]
91 mov r1, #0
92 mcr p15, 2, r1, c0, c0, 0 /*CCSELR*/
Maheshkumar Sivasubramanian1d2b69c2011-11-17 10:26:09 -070093 isb
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -060094 mrc p15, 1, r1, c0, c0, 0 /*CCSIDR*/
95 mov r2, #1
96 and r1, r2, r1, ASR #30 /* Check if the cache is write back */
97 orr r1, r0, r1
98 cmp r1, #1
99 bne skip
100 bl v7_flush_dcache_all
101
102skip: ldr r0, =saved_state
103 ldr r1, =saved_state_end
104 sub r1, r1, r0
105 bl v7_flush_kern_dcache_area
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -0600107 mrc p15, 0, r4, c1, c0, 0 /* read current CR */
108 bic r0, r4, #(1 << 2) /* clear dcache bit */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109 bic r0, r0, #(1 << 12) /* clear icache bit */
110 mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
111
112 dsb
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -0600113#ifdef CONFIG_ARCH_MSM_KRAIT
114 ldr r0, =SCM_SVC_BOOT
115 ldr r1, =SCM_CMD_TERMINATE_PC
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -0600116 ldr r2, =msm_pm_flush_l2_flag
117 ldr r2, [r2]
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -0600118 bl scm_call_atomic1
119#else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 wfi
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -0600121#endif
122 mcr p15, 0, r4, c1, c0, 0 /* restore d/i cache */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 isb
124
125#if defined(CONFIG_MSM_FIQ_SUPPORT)
126 cpsie f
127#endif
Pratik Patel17f3b822011-11-21 12:41:47 -0800128#ifdef CONFIG_MSM_JTAG
129 bl msm_jtag_restore_state
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130#endif
131 ldr r0, =saved_state /* restore registers */
132#if (NR_CPUS >= 2)
133 mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
134 ands r1, r1, #15 /* What CPU am I */
135 addne r0, r0, #CPU_SAVED_STATE_SIZE
136#endif
137
138 ldmfd r0, {r4-r14}
139 mov r0, #0 /* return power collapse failed */
140 bx lr
141
142ENTRY(msm_pm_collapse_exit)
143#if 0 /* serial debug */
144 mov r0, #0x80000016
145 mcr p15, 0, r0, c15, c2, 4
146 mov r0, #0xA9000000
147 add r0, r0, #0x00A00000 /* UART1 */
148 /*add r0, r0, #0x00C00000*/ /* UART3 */
149 mov r1, #'A'
150 str r1, [r0, #0x00C]
151#endif
152 ldr r1, =saved_state_end
153 ldr r2, =msm_pm_collapse_exit
154 adr r3, msm_pm_collapse_exit
155 add r1, r1, r3
156 sub r1, r1, r2
157#if (NR_CPUS >= 2)
158 mrc p15, 0, r2, c0, c0, 5 /* MPIDR */
159 ands r2, r2, #15 /* What CPU am I */
160 subeq r1, r1, #CPU_SAVED_STATE_SIZE
161#endif
162
163#ifdef CONFIG_MSM_CPU_AVS
164 ldmdb r1!, {r2-r4}
165#ifndef CONFIG_ARCH_MSM_KRAIT
166 mcr p15, 7, r4, c15, c1, 0 /* TSCSR */
167#endif
168 mcr p15, 7, r3, c15, c0, 6 /* AVSDSCR */
169 mcr p15, 7, r2, c15, c1, 7 /* AVSCSR */
170#endif
171 ldmdb r1!, {r2-r11}
172 mcr p15, 0, r4, c3, c0, 0 /* dacr */
173 mcr p15, 0, r3, c2, c0, 0 /* TTBR0 */
174#ifdef CONFIG_ARCH_MSM_SCORPION
175 /* This instruction is not valid for non scorpion processors */
176 mcr p15, 3, r5, c15, c0, 3 /* L2CR1 */
177#endif
178 mcr p15, 0, r6, c10, c2, 0 /* PRRR */
179 mcr p15, 0, r7, c10, c2, 1 /* NMRR */
180 mcr p15, 0, r8, c1, c0, 1 /* ACTLR */
181 mcr p15, 0, r9, c2, c0, 1 /* TTBR1 */
182 mcr p15, 0, r10, c13, c0, 3 /* TPIDRURO */
183 mcr p15, 0, r11, c13, c0, 1 /* context ID */
184 isb
185 ldmdb r1!, {r4-r14}
186 ldr r0, =msm_pm_pc_pgd
187 ldr r1, =msm_pm_collapse_exit
188 adr r3, msm_pm_collapse_exit
189 add r0, r0, r3
190 sub r0, r0, r1
191 ldr r0, [r0]
192 mrc p15, 0, r1, c2, c0, 0 /* save current TTBR0 */
193 and r3, r1, #0x7f /* mask to get TTB flags */
194 orr r0, r0, r3 /* add TTB flags to switch TTBR value */
195 mcr p15, 0, r0, c2, c0, 0 /* temporary switch TTBR0 */
196 isb
197 mcr p15, 0, r2, c1, c0, 0 /* MMU control */
198 isb
199msm_pm_mapped_pa:
200 /* Switch to virtual */
201 ldr r0, =msm_pm_pa_to_va
202 mov pc, r0
203msm_pm_pa_to_va:
204 mcr p15, 0, r1, c2, c0, 0 /* restore TTBR0 */
205 isb
206 mcr p15, 0, r3, c8, c7, 0 /* UTLBIALL */
207 mcr p15, 0, r3, c7, c5, 6 /* BPIALL */
208 dsb
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209 isb
Stepan Moskovchenko6fd9c922011-12-08 18:15:05 -0800210#ifdef CONFIG_ARCH_MSM_KRAIT
211 mrc p15, 0, r1, c0, c0, 0
212 ldr r3, =0xff00fc00
213 and r3, r1, r3
214 ldr r1, =0x51000400
215 cmp r3, r1
216 mrceq p15, 7, r3, c15, c0, 2
217 biceq r3, r3, #0x400
218 mcreq p15, 7, r3, c15, c0, 2
219#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 stmfd sp!, {lr}
221 bl v7_flush_kern_cache_all
Pratik Patel17f3b822011-11-21 12:41:47 -0800222#ifdef CONFIG_MSM_JTAG
223 bl msm_jtag_restore_state
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#endif
225 ldmfd sp!, {lr}
226 mov r0, #1
227 bx lr
228 nop
229 nop
230 nop
231 nop
232 nop
2331: b 1b
234
235ENTRY(msm_pm_boot_entry)
236 mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
237 and r0, r0, #15 /* what CPU am I */
238
239 ldr r1, =msm_pm_boot_vector
240 ldr r2, =msm_pm_boot_entry
241 adr r3, msm_pm_boot_entry
242 add r1, r1, r3 /* translate virt to phys addr */
243 sub r1, r1, r2
244
245 add r1, r1, r0, LSL #2 /* locate boot vector for our cpu */
246 ldr pc, [r1] /* jump */
247
248ENTRY(msm_pm_write_boot_vector)
249 ldr r2, =msm_pm_boot_vector
250 add r2, r2, r0, LSL #2 /* locate boot vector for our cpu */
251 str r1, [r2]
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600252 mov r0, r2
253 ldr r1, =4
254 stmfd sp!, {lr}
255 bl v7_flush_kern_dcache_area
256 ldmfd sp!, {lr}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 bx lr
258
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600259ENTRY(msm_pm_set_l2_flush_flag)
260 ldr r1, =msm_pm_flush_l2_flag
261 str r0, [r1]
262 bx lr
263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 .data
265
266 .globl msm_pm_pc_pgd
267msm_pm_pc_pgd:
268 .long 0x0
269
270saved_state:
271#if (NR_CPUS >= 2)
272 .space CPU_SAVED_STATE_SIZE * 2 /* This code only supports 2 cores */
273#else
274 .space CPU_SAVED_STATE_SIZE
275#endif
276saved_state_end:
277
278msm_pm_boot_vector:
279 .space 4 * NR_CPUS
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600280
281/*
282 * Default the l2 flush flag to 1 so that caches are flushed during power
283 * collapse unless the L2 driver decides to flush them only during L2
284 * Power collapse.
285 */
286msm_pm_flush_l2_flag:
287 .long 0x1