blob: b0c075bd2b3318106b5c060bb011173400809d40 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Idle processing for ARMv7-based Qualcomm SoCs.
3 *
4 * Copyright (C) 2007 Google, Inc.
5 * Copyright (c) 2007-2009, 2011 Code Aurora Forum. All rights reserved.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/linkage.h>
19#include <linux/threads.h>
20#include <asm/assembler.h>
21
22#ifdef CONFIG_MSM_CPU_AVS
23/* 11 general purpose registers (r4-r14), 10 cp15 registers, 3 AVS registers */
24#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10 + 4 * 3)
25#else
26/* 11 general purpose registers (r4-r14), 10 cp15 registers */
27#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10)
28#endif
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -060029#ifdef CONFIG_ARCH_MSM_KRAIT
30#define SCM_SVC_BOOT 0x1
31#define SCM_CMD_TERMINATE_PC 0x2
32#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34ENTRY(msm_arch_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035 wfi
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036 bx lr
37
38ENTRY(msm_pm_collapse)
39#if defined(CONFIG_MSM_FIQ_SUPPORT)
40 cpsid f
41#endif
42
43 ldr r0, =saved_state
44#if (NR_CPUS >= 2)
45 mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
46 ands r1, r1, #15 /* What CPU am I */
47 addne r0, r0, #CPU_SAVED_STATE_SIZE
48#endif
49
50 stmia r0!, {r4-r14}
51 mrc p15, 0, r1, c1, c0, 0 /* MMU control */
52 mrc p15, 0, r2, c2, c0, 0 /* TTBR0 */
53 mrc p15, 0, r3, c3, c0, 0 /* dacr */
54#ifdef CONFIG_ARCH_MSM_SCORPION
55 /* This instruction is not valid for non scorpion processors */
56 mrc p15, 3, r4, c15, c0, 3 /* L2CR1 is the L2 cache control reg 1 */
57#endif
58 mrc p15, 0, r5, c10, c2, 0 /* PRRR */
59 mrc p15, 0, r6, c10, c2, 1 /* NMRR */
60 mrc p15, 0, r7, c1, c0, 1 /* ACTLR */
61 mrc p15, 0, r8, c2, c0, 1 /* TTBR1 */
62 mrc p15, 0, r9, c13, c0, 3 /* TPIDRURO */
63 mrc p15, 0, ip, c13, c0, 1 /* context ID */
64 stmia r0!, {r1-r9, ip}
65#ifdef CONFIG_MSM_CPU_AVS
66 mrc p15, 7, r1, c15, c1, 7 /* AVSCSR is the Adaptive Voltage Scaling
67 * Control and Status Register */
68 mrc p15, 7, r2, c15, c0, 6 /* AVSDSCR is the Adaptive Voltage
69 * Scaling Delay Synthesizer Control
70 * Register */
71#ifndef CONFIG_ARCH_MSM_KRAIT
72 mrc p15, 7, r3, c15, c1, 0 /* TSCSR is the Temperature Status and
73 * Control Register
74 */
75#endif
76
77 stmia r0!, {r1-r3}
78#endif
79
Pratik Patelfd6f56a2011-10-10 17:47:55 -070080#ifdef CONFIG_MSM_DEBUG_ACROSS_PC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 bl msm_save_jtag_debug
82#endif
Pratik Patel7831c082011-06-08 21:44:37 -070083#ifdef CONFIG_MSM_TRACE_ACROSS_PC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 bl etm_save_reg_check
85#endif
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -060086
87 ldr r0, =msm_pm_flush_l2_flag
88 ldr r0, [r0]
89 mov r1, #0
90 mcr p15, 2, r1, c0, c0, 0 /*CCSELR*/
91 mrc p15, 1, r1, c0, c0, 0 /*CCSIDR*/
92 mov r2, #1
93 and r1, r2, r1, ASR #30 /* Check if the cache is write back */
94 orr r1, r0, r1
95 cmp r1, #1
96 bne skip
97 bl v7_flush_dcache_all
98
99skip: ldr r0, =saved_state
100 ldr r1, =saved_state_end
101 sub r1, r1, r0
102 bl v7_flush_kern_dcache_area
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -0600104 mrc p15, 0, r4, c1, c0, 0 /* read current CR */
105 bic r0, r4, #(1 << 2) /* clear dcache bit */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 bic r0, r0, #(1 << 12) /* clear icache bit */
107 mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
108
109 dsb
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -0600110#ifdef CONFIG_ARCH_MSM_KRAIT
111 ldr r0, =SCM_SVC_BOOT
112 ldr r1, =SCM_CMD_TERMINATE_PC
113 ldr r2, =0
114 bl scm_call_atomic1
115#else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 wfi
Maheshkumar Sivasubramanianfa1d0dd2011-07-26 16:02:55 -0600117#endif
118 mcr p15, 0, r4, c1, c0, 0 /* restore d/i cache */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 isb
120
121#if defined(CONFIG_MSM_FIQ_SUPPORT)
122 cpsie f
123#endif
Pratik Patel7831c082011-06-08 21:44:37 -0700124#ifdef CONFIG_MSM_TRACE_ACROSS_PC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 bl etm_restore_reg_check
126#endif
Pratik Patelfd6f56a2011-10-10 17:47:55 -0700127#ifdef CONFIG_MSM_DEBUG_ACROSS_PC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128 bl msm_restore_jtag_debug
129#endif
130 ldr r0, =saved_state /* restore registers */
131#if (NR_CPUS >= 2)
132 mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
133 ands r1, r1, #15 /* What CPU am I */
134 addne r0, r0, #CPU_SAVED_STATE_SIZE
135#endif
136
137 ldmfd r0, {r4-r14}
138 mov r0, #0 /* return power collapse failed */
139 bx lr
140
141ENTRY(msm_pm_collapse_exit)
142#if 0 /* serial debug */
143 mov r0, #0x80000016
144 mcr p15, 0, r0, c15, c2, 4
145 mov r0, #0xA9000000
146 add r0, r0, #0x00A00000 /* UART1 */
147 /*add r0, r0, #0x00C00000*/ /* UART3 */
148 mov r1, #'A'
149 str r1, [r0, #0x00C]
150#endif
151 ldr r1, =saved_state_end
152 ldr r2, =msm_pm_collapse_exit
153 adr r3, msm_pm_collapse_exit
154 add r1, r1, r3
155 sub r1, r1, r2
156#if (NR_CPUS >= 2)
157 mrc p15, 0, r2, c0, c0, 5 /* MPIDR */
158 ands r2, r2, #15 /* What CPU am I */
159 subeq r1, r1, #CPU_SAVED_STATE_SIZE
160#endif
161
162#ifdef CONFIG_MSM_CPU_AVS
163 ldmdb r1!, {r2-r4}
164#ifndef CONFIG_ARCH_MSM_KRAIT
165 mcr p15, 7, r4, c15, c1, 0 /* TSCSR */
166#endif
167 mcr p15, 7, r3, c15, c0, 6 /* AVSDSCR */
168 mcr p15, 7, r2, c15, c1, 7 /* AVSCSR */
169#endif
170 ldmdb r1!, {r2-r11}
171 mcr p15, 0, r4, c3, c0, 0 /* dacr */
172 mcr p15, 0, r3, c2, c0, 0 /* TTBR0 */
173#ifdef CONFIG_ARCH_MSM_SCORPION
174 /* This instruction is not valid for non scorpion processors */
175 mcr p15, 3, r5, c15, c0, 3 /* L2CR1 */
176#endif
177 mcr p15, 0, r6, c10, c2, 0 /* PRRR */
178 mcr p15, 0, r7, c10, c2, 1 /* NMRR */
179 mcr p15, 0, r8, c1, c0, 1 /* ACTLR */
180 mcr p15, 0, r9, c2, c0, 1 /* TTBR1 */
181 mcr p15, 0, r10, c13, c0, 3 /* TPIDRURO */
182 mcr p15, 0, r11, c13, c0, 1 /* context ID */
183 isb
184 ldmdb r1!, {r4-r14}
185 ldr r0, =msm_pm_pc_pgd
186 ldr r1, =msm_pm_collapse_exit
187 adr r3, msm_pm_collapse_exit
188 add r0, r0, r3
189 sub r0, r0, r1
190 ldr r0, [r0]
191 mrc p15, 0, r1, c2, c0, 0 /* save current TTBR0 */
192 and r3, r1, #0x7f /* mask to get TTB flags */
193 orr r0, r0, r3 /* add TTB flags to switch TTBR value */
194 mcr p15, 0, r0, c2, c0, 0 /* temporary switch TTBR0 */
195 isb
196 mcr p15, 0, r2, c1, c0, 0 /* MMU control */
197 isb
198msm_pm_mapped_pa:
199 /* Switch to virtual */
200 ldr r0, =msm_pm_pa_to_va
201 mov pc, r0
202msm_pm_pa_to_va:
203 mcr p15, 0, r1, c2, c0, 0 /* restore TTBR0 */
204 isb
205 mcr p15, 0, r3, c8, c7, 0 /* UTLBIALL */
206 mcr p15, 0, r3, c7, c5, 6 /* BPIALL */
207 dsb
208
209 isb
210 stmfd sp!, {lr}
211 bl v7_flush_kern_cache_all
Pratik Patel7831c082011-06-08 21:44:37 -0700212#ifdef CONFIG_MSM_TRACE_ACROSS_PC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 bl etm_restore_reg_check
214#endif
Pratik Patelfd6f56a2011-10-10 17:47:55 -0700215#ifdef CONFIG_MSM_DEBUG_ACROSS_PC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 bl msm_restore_jtag_debug
217#endif
218 ldmfd sp!, {lr}
219 mov r0, #1
220 bx lr
221 nop
222 nop
223 nop
224 nop
225 nop
2261: b 1b
227
228ENTRY(msm_pm_boot_entry)
229 mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
230 and r0, r0, #15 /* what CPU am I */
231
232 ldr r1, =msm_pm_boot_vector
233 ldr r2, =msm_pm_boot_entry
234 adr r3, msm_pm_boot_entry
235 add r1, r1, r3 /* translate virt to phys addr */
236 sub r1, r1, r2
237
238 add r1, r1, r0, LSL #2 /* locate boot vector for our cpu */
239 ldr pc, [r1] /* jump */
240
241ENTRY(msm_pm_write_boot_vector)
242 ldr r2, =msm_pm_boot_vector
243 add r2, r2, r0, LSL #2 /* locate boot vector for our cpu */
244 str r1, [r2]
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600245 mov r0, r2
246 ldr r1, =4
247 stmfd sp!, {lr}
248 bl v7_flush_kern_dcache_area
249 ldmfd sp!, {lr}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 bx lr
251
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600252ENTRY(msm_pm_set_l2_flush_flag)
253 ldr r1, =msm_pm_flush_l2_flag
254 str r0, [r1]
255 bx lr
256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 .data
258
259 .globl msm_pm_pc_pgd
260msm_pm_pc_pgd:
261 .long 0x0
262
263saved_state:
264#if (NR_CPUS >= 2)
265 .space CPU_SAVED_STATE_SIZE * 2 /* This code only supports 2 cores */
266#else
267 .space CPU_SAVED_STATE_SIZE
268#endif
269saved_state_end:
270
271msm_pm_boot_vector:
272 .space 4 * NR_CPUS
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600273
274/*
275 * Default the l2 flush flag to 1 so that caches are flushed during power
276 * collapse unless the L2 driver decides to flush them only during L2
277 * Power collapse.
278 */
279msm_pm_flush_l2_flag:
280 .long 0x1