blob: b964417585faa8feb2d2b8cf3dba6ea0471bd35f [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/regulator/consumer.h>
Stephen Boyd3f4da322011-08-30 01:03:23 -070021#include <linux/platform_device.h>
Stephen Boyd9802ca92011-05-25 15:09:59 -070022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include <asm/mach-types.h>
24
Stephen Boyd9802ca92011-05-25 15:09:59 -070025#include <mach/msm_iomap.h>
26#include <mach/scm.h>
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include "peripheral-loader.h"
Stephen Boyde44ec392011-08-29 12:03:24 -070029#include "scm-pas.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#define MSM_FW_QDSP6SS_PHYS 0x08800000
32#define MSM_SW_QDSP6SS_PHYS 0x08900000
33#define MSM_LPASS_QDSP6SS_PHYS 0x28800000
34#define MSM_MSS_ENABLE_PHYS 0x08B00000
35
36#define QDSP6SS_RST_EVB 0x0
37#define QDSP6SS_RESET 0x04
38#define QDSP6SS_CGC_OVERRIDE 0x18
39#define QDSP6SS_STRAP_TCM 0x1C
40#define QDSP6SS_STRAP_AHB 0x20
41#define QDSP6SS_GFMUX_CTL 0x30
42#define QDSP6SS_PWR_CTL 0x38
43
44#define MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C70)
45#define MSS_SLP_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C60)
46#define SFAB_MSS_M_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2340)
47#define SFAB_MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C00)
48#define SFAB_MSS_Q6_FW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2044)
49#define SFAB_MSS_Q6_SW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2040)
50#define SFAB_LPASS_Q6_ACLK_CTL (MSM_CLK_CTL_BASE + 0x23A0)
51#define MSS_Q6FW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C6C)
52#define MSS_Q6SW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C68)
53#define MSS_RESET (MSM_CLK_CTL_BASE + 0x2C64)
54
55#define Q6SS_SS_ARES BIT(0)
56#define Q6SS_CORE_ARES BIT(1)
57#define Q6SS_ISDB_ARES BIT(2)
58#define Q6SS_ETM_ARES BIT(3)
59#define Q6SS_STOP_CORE_ARES BIT(4)
60#define Q6SS_PRIV_ARES BIT(5)
61
62#define Q6SS_L2DATA_SLP_NRET_N BIT(0)
63#define Q6SS_SLP_RET_N BIT(1)
64#define Q6SS_L1TCM_SLP_NRET_N BIT(2)
65#define Q6SS_L2TAG_SLP_NRET_N BIT(3)
66#define Q6SS_ETB_SLEEP_NRET_N BIT(4)
67#define Q6SS_ARR_STBY_N BIT(5)
68#define Q6SS_CLAMP_IO BIT(6)
69
70#define Q6SS_CLK_ENA BIT(1)
71#define Q6SS_SRC_SWITCH_CLK_OVR BIT(8)
72#define Q6SS_AXIS_ACLK_EN BIT(9)
73
74#define MSM_RIVA_PHYS 0x03204000
75#define RIVA_PMU_A2XB_CFG (msm_riva_base + 0xB8)
76#define RIVA_PMU_A2XB_CFG_EN BIT(0)
77
78#define RIVA_PMU_CFG (msm_riva_base + 0x28)
79#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
80#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
81#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
82
83#define RIVA_PMU_OVRD_VAL (msm_riva_base + 0x30)
84#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
85#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
86
87#define RIVA_PMU_CCPU_CTL (msm_riva_base + 0x9C)
88#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
89#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
90
91#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR (msm_riva_base + 0xA0)
92
93#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
94#define PLL_MODE_OUTCTRL BIT(0)
95#define PLL_MODE_BYPASSNL BIT(1)
96#define PLL_MODE_RESET_N BIT(2)
97#define PLL_MODE_REF_XO_SEL 0x30
98#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
99#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
100#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
101#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
102#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
103#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
104#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
105
106#define RIVA_PMU_ROOT_CLK_SEL (msm_riva_base + 0xC8)
107#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
108
109#define RIVA_PMU_CLK_ROOT3 (msm_riva_base + 0x78)
110#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
111#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
112#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
113#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
114#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
115#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
116#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
117#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
118#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
119
120#define PPSS_RESET (MSM_CLK_CTL_BASE + 0x2594)
121#define PPSS_PROC_CLK_CTL (MSM_CLK_CTL_BASE + 0x2588)
122#define PPSS_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2580)
123
124struct q6_data {
125 const unsigned strap_tcm_base;
126 const unsigned strap_ahb_upper;
127 const unsigned strap_ahb_lower;
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700128 const unsigned reg_base_phys;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 void __iomem *reg_base;
130 void __iomem *aclk_reg;
131 void __iomem *jtag_clk_reg;
132 int start_addr;
133 struct regulator *vreg;
134 bool vreg_enabled;
135 const char *name;
136};
137
138static struct q6_data q6_lpass = {
139 .strap_tcm_base = (0x146 << 16),
140 .strap_ahb_upper = (0x029 << 16),
141 .strap_ahb_lower = (0x028 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700142 .reg_base_phys = MSM_LPASS_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143 .aclk_reg = SFAB_LPASS_Q6_ACLK_CTL,
144 .name = "q6_lpass",
145};
146
147static struct q6_data q6_modem_fw = {
148 .strap_tcm_base = (0x40 << 16),
149 .strap_ahb_upper = (0x09 << 16),
150 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700151 .reg_base_phys = MSM_FW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152 .aclk_reg = SFAB_MSS_Q6_FW_ACLK_CTL,
153 .jtag_clk_reg = MSS_Q6FW_JTAG_CLK_CTL,
154 .name = "q6_modem_fw",
155};
156
157static struct q6_data q6_modem_sw = {
158 .strap_tcm_base = (0x42 << 16),
159 .strap_ahb_upper = (0x09 << 16),
160 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700161 .reg_base_phys = MSM_SW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700162 .aclk_reg = SFAB_MSS_Q6_SW_ACLK_CTL,
163 .jtag_clk_reg = MSS_Q6SW_JTAG_CLK_CTL,
164 .name = "q6_modem_sw",
165};
166
167static void __iomem *mss_enable_reg;
168static void __iomem *msm_riva_base;
169static unsigned long riva_start;
170
Stephen Boyd3f4da322011-08-30 01:03:23 -0700171static int init_image_lpass_q6_trusted(struct pil_desc *pil,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700172 const u8 *metadata, size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700173{
Stephen Boyde44ec392011-08-29 12:03:24 -0700174 return pas_init_image(PAS_Q6, metadata, size);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700175}
176
Stephen Boyd3f4da322011-08-30 01:03:23 -0700177static int init_image_modem_fw_q6_trusted(struct pil_desc *pil,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700178 const u8 *metadata, size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700179{
Stephen Boyde44ec392011-08-29 12:03:24 -0700180 return pas_init_image(PAS_MODEM_FW, metadata, size);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700181}
182
Stephen Boyd3f4da322011-08-30 01:03:23 -0700183static int init_image_modem_sw_q6_trusted(struct pil_desc *pil,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700184 const u8 *metadata, size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700185{
Stephen Boyde44ec392011-08-29 12:03:24 -0700186 return pas_init_image(PAS_MODEM_SW, metadata, size);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700187}
188
Stephen Boyd3f4da322011-08-30 01:03:23 -0700189static int init_image_lpass_q6_untrusted(struct pil_desc *pil,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700190 const u8 *metadata, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191{
192 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
193 q6_lpass.start_addr = ehdr->e_entry;
194 return 0;
195}
196
Stephen Boyd3f4da322011-08-30 01:03:23 -0700197static int init_image_modem_fw_q6_untrusted(struct pil_desc *pil,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700198 const u8 *metadata, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199{
200 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
201 q6_modem_fw.start_addr = ehdr->e_entry;
202 return 0;
203}
204
Stephen Boyd3f4da322011-08-30 01:03:23 -0700205static int init_image_modem_sw_q6_untrusted(struct pil_desc *pil,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700206 const u8 *metadata, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207{
208 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
209 q6_modem_sw.start_addr = ehdr->e_entry;
210 return 0;
211}
212
Stephen Boyd3f4da322011-08-30 01:03:23 -0700213static int verify_blob(struct pil_desc *pil, u32 phy_addr, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214{
215 return 0;
216}
217
Stephen Boydb6b54852011-08-16 14:16:27 -0700218static int power_up_q6(struct q6_data *q6)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700219{
220 int err;
221
222 err = regulator_set_voltage(q6->vreg, 1050000, 1050000);
223 if (err) {
224 pr_err("Failed to set %s regulator's voltage.\n", q6->name);
225 return err;
226 }
Stephen Boydb6b54852011-08-16 14:16:27 -0700227 err = regulator_set_optimum_mode(q6->vreg, 100000);
228 if (err < 0) {
229 pr_err("Failed to set %s regulator's mode.\n", q6->name);
230 return err;
231 }
Stephen Boyd9802ca92011-05-25 15:09:59 -0700232 err = regulator_enable(q6->vreg);
233 if (err) {
234 pr_err("Failed to enable %s's regulator.\n", q6->name);
235 return err;
236 }
237 q6->vreg_enabled = true;
Stephen Boydb6b54852011-08-16 14:16:27 -0700238 return 0;
Stephen Boyd9802ca92011-05-25 15:09:59 -0700239}
240
Stephen Boydb6b54852011-08-16 14:16:27 -0700241static int reset_q6_trusted(int id, struct q6_data *q6)
242{
243 int err = power_up_q6(q6);
244 if (err)
245 return err;
Stephen Boyde44ec392011-08-29 12:03:24 -0700246 return pas_auth_and_reset(id);
Stephen Boydb6b54852011-08-16 14:16:27 -0700247}
Stephen Boyd9802ca92011-05-25 15:09:59 -0700248
Stephen Boyd3f4da322011-08-30 01:03:23 -0700249static int reset_lpass_q6_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700250{
251 return reset_q6_trusted(PAS_Q6, &q6_lpass);
252}
253
Stephen Boyd3f4da322011-08-30 01:03:23 -0700254static int reset_modem_fw_q6_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700255{
256 return reset_q6_trusted(PAS_MODEM_FW, &q6_modem_fw);
257}
258
Stephen Boyd3f4da322011-08-30 01:03:23 -0700259static int reset_modem_sw_q6_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700260{
261 return reset_q6_trusted(PAS_MODEM_SW, &q6_modem_sw);
262}
263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264static int reset_q6_untrusted(struct q6_data *q6)
265{
266 u32 reg, err = 0;
267
Stephen Boydb6b54852011-08-16 14:16:27 -0700268 err = power_up_q6(q6);
269 if (err)
270 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 /* Enable Q6 ACLK */
272 writel_relaxed(0x10, q6->aclk_reg);
273
274 if (q6 == &q6_modem_fw || q6 == &q6_modem_sw) {
275 /* Enable MSS clocks */
276 writel_relaxed(0x10, SFAB_MSS_M_ACLK_CTL);
277 writel_relaxed(0x10, SFAB_MSS_S_HCLK_CTL);
278 writel_relaxed(0x10, MSS_S_HCLK_CTL);
279 writel_relaxed(0x10, MSS_SLP_CLK_CTL);
280 /* Wait for clocks to enable */
281 mb();
282 udelay(10);
283
284 /* Enable JTAG clocks */
285 /* TODO: Remove if/when Q6 software enables them? */
286 writel_relaxed(0x10, q6->jtag_clk_reg);
287
288 /* De-assert MSS reset */
289 writel_relaxed(0x0, MSS_RESET);
290 mb();
291 udelay(10);
292
293 /* Enable MSS */
294 writel_relaxed(0x7, mss_enable_reg);
295 }
296
297 /*
298 * Assert AXIS_ACLK_EN override to allow for correct updating of the
299 * QDSP6_CORE_STATE status bit. This is mandatory only for the SW Q6
300 * in 8960v1 and optional elsewhere.
301 */
302 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
303 reg |= Q6SS_AXIS_ACLK_EN;
304 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
305
306 /* Deassert Q6SS_SS_ARES */
307 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
308 reg &= ~(Q6SS_SS_ARES);
309 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
310
311 /* Program boot address */
312 writel_relaxed((q6->start_addr >> 8) & 0xFFFFFF,
313 q6->reg_base + QDSP6SS_RST_EVB);
314
315 /* Program TCM and AHB address ranges */
316 writel_relaxed(q6->strap_tcm_base, q6->reg_base + QDSP6SS_STRAP_TCM);
317 writel_relaxed(q6->strap_ahb_upper | q6->strap_ahb_lower,
318 q6->reg_base + QDSP6SS_STRAP_AHB);
319
320 /* Turn off Q6 core clock */
321 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
322 q6->reg_base + QDSP6SS_GFMUX_CTL);
323
324 /* Put memories to sleep */
325 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
326
327 /* Assert resets */
328 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
329 reg |= (Q6SS_CORE_ARES | Q6SS_ISDB_ARES | Q6SS_ETM_ARES
330 | Q6SS_STOP_CORE_ARES);
331 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
332
333 /* Wait 8 AHB cycles for Q6 to be fully reset (AHB = 1.5Mhz) */
334 mb();
335 usleep_range(20, 30);
336
337 /* Turn on Q6 memories */
338 reg = Q6SS_L2DATA_SLP_NRET_N | Q6SS_SLP_RET_N | Q6SS_L1TCM_SLP_NRET_N
339 | Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLEEP_NRET_N | Q6SS_ARR_STBY_N
340 | Q6SS_CLAMP_IO;
341 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
342
343 /* Turn on Q6 core clock */
344 reg = Q6SS_CLK_ENA | Q6SS_SRC_SWITCH_CLK_OVR;
345 writel_relaxed(reg, q6->reg_base + QDSP6SS_GFMUX_CTL);
346
347 /* Remove Q6SS_CLAMP_IO */
348 reg = readl_relaxed(q6->reg_base + QDSP6SS_PWR_CTL);
349 reg &= ~Q6SS_CLAMP_IO;
350 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
351
352 /* Bring Q6 core out of reset and start execution. */
353 writel_relaxed(0x0, q6->reg_base + QDSP6SS_RESET);
354
355 /*
356 * Re-enable auto-gating of AXIS_ACLK at lease one AXI clock cycle
357 * after resets are de-asserted.
358 */
359 mb();
360 usleep_range(1, 10);
361 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
362 reg &= ~Q6SS_AXIS_ACLK_EN;
363 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
364
Stephen Boydb6b54852011-08-16 14:16:27 -0700365 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366}
367
Stephen Boyd3f4da322011-08-30 01:03:23 -0700368static int reset_lpass_q6_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369{
370 return reset_q6_untrusted(&q6_lpass);
371}
372
Stephen Boyd3f4da322011-08-30 01:03:23 -0700373static int reset_modem_fw_q6_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374{
375 return reset_q6_untrusted(&q6_modem_fw);
376}
377
Stephen Boyd3f4da322011-08-30 01:03:23 -0700378static int reset_modem_sw_q6_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379{
380 return reset_q6_untrusted(&q6_modem_sw);
381}
382
Stephen Boyd9802ca92011-05-25 15:09:59 -0700383static int shutdown_q6_trusted(int id, struct q6_data *q6)
384{
385 int ret;
386
Stephen Boyde44ec392011-08-29 12:03:24 -0700387 ret = pas_shutdown(id);
Matt Wagantalldafcd3d2011-08-02 20:27:59 -0700388 if (ret)
389 return ret;
390
Stephen Boyd9802ca92011-05-25 15:09:59 -0700391 if (q6->vreg_enabled) {
392 regulator_disable(q6->vreg);
393 q6->vreg_enabled = false;
394 }
395
396 return ret;
397}
398
Stephen Boyd3f4da322011-08-30 01:03:23 -0700399static int shutdown_lpass_q6_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700400{
401 return shutdown_q6_trusted(PAS_Q6, &q6_lpass);
402}
403
Stephen Boyd3f4da322011-08-30 01:03:23 -0700404static int shutdown_modem_fw_q6_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700405{
406 return shutdown_q6_trusted(PAS_MODEM_FW, &q6_modem_fw);
407}
408
Stephen Boyd3f4da322011-08-30 01:03:23 -0700409static int shutdown_modem_sw_q6_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700410{
411 return shutdown_q6_trusted(PAS_MODEM_SW, &q6_modem_sw);
412}
413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414static int shutdown_q6_untrusted(struct q6_data *q6)
415{
416 u32 reg;
417
418 /* Turn off Q6 core clock */
419 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
420 q6->reg_base + QDSP6SS_GFMUX_CTL);
421
422 /* Assert resets */
423 reg = (Q6SS_SS_ARES | Q6SS_CORE_ARES | Q6SS_ISDB_ARES
424 | Q6SS_ETM_ARES | Q6SS_STOP_CORE_ARES | Q6SS_PRIV_ARES);
425 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
426
427 /* Turn off Q6 memories */
428 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
429
430 /* Put Modem Subsystem back into reset when shutting down FWQ6 */
431 if (q6 == &q6_modem_fw)
432 writel_relaxed(0x1, MSS_RESET);
433
434 if (q6->vreg_enabled) {
435 regulator_disable(q6->vreg);
436 q6->vreg_enabled = false;
437 }
438
439 return 0;
440}
441
Stephen Boyd3f4da322011-08-30 01:03:23 -0700442static int shutdown_lpass_q6_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443{
444 return shutdown_q6_untrusted(&q6_lpass);
445}
446
Stephen Boyd3f4da322011-08-30 01:03:23 -0700447static int shutdown_modem_fw_q6_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448{
449 return shutdown_q6_untrusted(&q6_modem_fw);
450}
451
Stephen Boyd3f4da322011-08-30 01:03:23 -0700452static int shutdown_modem_sw_q6_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453{
454 return shutdown_q6_untrusted(&q6_modem_sw);
455}
456
Stephen Boyd3f4da322011-08-30 01:03:23 -0700457static int init_image_riva_untrusted(struct pil_desc *pil, const u8 *metadata,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700458 size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459{
460 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
461 riva_start = ehdr->e_entry;
462 return 0;
463}
464
Stephen Boyd3f4da322011-08-30 01:03:23 -0700465static int reset_riva_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466{
467 u32 reg;
468 bool xo;
469
470 /* Enable A2XB bridge */
471 reg = readl(RIVA_PMU_A2XB_CFG);
472 reg |= RIVA_PMU_A2XB_CFG_EN;
473 writel(reg, RIVA_PMU_A2XB_CFG);
474
475 /* Determine which XO to use */
476 reg = readl(RIVA_PMU_CFG);
477 xo = (reg & RIVA_PMU_CFG_IRIS_XO_MODE) == RIVA_PMU_CFG_IRIS_XO_MODE_48;
478
479 /* Program PLL 13 to 960 MHz */
480 reg = readl(RIVA_PLL_MODE);
481 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
482 writel(reg, RIVA_PLL_MODE);
483
484 if (xo)
485 writel(0x40000C00 | 40, RIVA_PLL_L_VAL);
486 else
487 writel(0x40000C00 | 50, RIVA_PLL_L_VAL);
488 writel(0, RIVA_PLL_M_VAL);
489 writel(1, RIVA_PLL_N_VAL);
490 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
491
492 reg = readl(RIVA_PLL_MODE);
493 reg &= ~(PLL_MODE_REF_XO_SEL);
494 reg |= xo ? PLL_MODE_REF_XO_SEL_RF : PLL_MODE_REF_XO_SEL_CXO;
495 writel(reg, RIVA_PLL_MODE);
496
497 /* Enable PLL 13 */
498 reg |= PLL_MODE_BYPASSNL;
499 writel(reg, RIVA_PLL_MODE);
500
501 usleep_range(10, 20);
502
503 reg |= PLL_MODE_RESET_N;
504 writel(reg, RIVA_PLL_MODE);
505 reg |= PLL_MODE_OUTCTRL;
506 writel(reg, RIVA_PLL_MODE);
507
508 /* Wait for PLL to settle */
509 usleep_range(50, 100);
510
511 /* Configure cCPU for 240 MHz */
512 reg = readl(RIVA_PMU_CLK_ROOT3);
513 if (readl(RIVA_PMU_ROOT_CLK_SEL) & RIVA_PMU_ROOT_CLK_SEL_3) {
514 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
515 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
516 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
517 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
518 } else {
519 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
520 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
521 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
522 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
523 }
524 writel(reg, RIVA_PMU_CLK_ROOT3);
525 reg |= RIVA_PMU_CLK_ROOT3_ENA;
526 writel(reg, RIVA_PMU_CLK_ROOT3);
527 reg = readl(RIVA_PMU_ROOT_CLK_SEL);
528 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
529 writel(reg, RIVA_PMU_ROOT_CLK_SEL);
530
531 /* Use the high vector table */
532 reg = readl(RIVA_PMU_CCPU_CTL);
533 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
534 writel(reg, RIVA_PMU_CCPU_CTL);
535
536 /* Set base memory address */
537 writel_relaxed(riva_start >> 16, RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
538
539 /* Clear warmboot bit indicating this is a cold boot */
540 reg = readl(RIVA_PMU_CFG);
541 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
542 writel(reg, RIVA_PMU_CFG);
543
544 /* Enable the cCPU clock */
545 reg = readl(RIVA_PMU_OVRD_VAL);
546 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
547 writel(reg, RIVA_PMU_OVRD_VAL);
548
549 /* Take cCPU out of reset */
550 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
551 writel(reg, RIVA_PMU_OVRD_VAL);
552
553 return 0;
554}
555
Stephen Boyd3f4da322011-08-30 01:03:23 -0700556static int shutdown_riva_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557{
558 u32 reg;
559 /* Put riva into reset */
560 reg = readl(RIVA_PMU_OVRD_VAL);
561 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
562 writel(reg, RIVA_PMU_OVRD_VAL);
563 return 0;
564}
565
Stephen Boyd3f4da322011-08-30 01:03:23 -0700566static int init_image_riva_trusted(struct pil_desc *pil, const u8 *metadata,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700567 size_t size)
Stephen Boydcc724232011-08-17 17:56:00 -0700568{
Stephen Boyde44ec392011-08-29 12:03:24 -0700569 return pas_init_image(PAS_RIVA, metadata, size);
Stephen Boydcc724232011-08-17 17:56:00 -0700570}
571
Stephen Boyd3f4da322011-08-30 01:03:23 -0700572static int reset_riva_trusted(struct pil_desc *pil)
Stephen Boydcc724232011-08-17 17:56:00 -0700573{
Stephen Boyde44ec392011-08-29 12:03:24 -0700574 return pas_auth_and_reset(PAS_RIVA);
Stephen Boydcc724232011-08-17 17:56:00 -0700575}
576
Stephen Boyd3f4da322011-08-30 01:03:23 -0700577static int shutdown_riva_trusted(struct pil_desc *pil)
Stephen Boydcc724232011-08-17 17:56:00 -0700578{
Stephen Boyde44ec392011-08-29 12:03:24 -0700579 return pas_shutdown(PAS_RIVA);
Stephen Boydcc724232011-08-17 17:56:00 -0700580}
581
Stephen Boyd3f4da322011-08-30 01:03:23 -0700582static int init_image_dsps_untrusted(struct pil_desc *pil, const u8 *metadata,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700583 size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584{
585 /* Bring memory and bus interface out of reset */
586 writel_relaxed(0x2, PPSS_RESET);
587 writel_relaxed(0x10, PPSS_HCLK_CTL);
588 return 0;
589}
590
Stephen Boyd3f4da322011-08-30 01:03:23 -0700591static int reset_dsps_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592{
593 writel_relaxed(0x10, PPSS_PROC_CLK_CTL);
594 /* Bring DSPS out of reset */
595 writel_relaxed(0x0, PPSS_RESET);
596 return 0;
597}
598
Stephen Boyd3f4da322011-08-30 01:03:23 -0700599static int shutdown_dsps_untrusted(struct pil_desc *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600{
601 writel_relaxed(0x2, PPSS_RESET);
602 writel_relaxed(0x0, PPSS_PROC_CLK_CTL);
603 return 0;
604}
605
Stephen Boyd3f4da322011-08-30 01:03:23 -0700606static int init_image_dsps_trusted(struct pil_desc *pil, const u8 *metadata,
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700607 size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700608{
Stephen Boyde44ec392011-08-29 12:03:24 -0700609 return pas_init_image(PAS_DSPS, metadata, size);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700610}
611
Stephen Boyd3f4da322011-08-30 01:03:23 -0700612static int reset_dsps_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700613{
Stephen Boyde44ec392011-08-29 12:03:24 -0700614 return pas_auth_and_reset(PAS_DSPS);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700615}
616
Stephen Boyd3f4da322011-08-30 01:03:23 -0700617static int shutdown_dsps_trusted(struct pil_desc *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700618{
Stephen Boyde44ec392011-08-29 12:03:24 -0700619 return pas_shutdown(PAS_DSPS);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700620}
621
Stephen Boyd3f4da322011-08-30 01:03:23 -0700622static int init_image_tzapps(struct pil_desc *pil, const u8 *metadata,
Matt Wagantall8d440572011-08-24 22:45:35 -0700623 size_t size)
624{
625 return pas_init_image(PAS_TZAPPS, metadata, size);
626}
627
Stephen Boyd3f4da322011-08-30 01:03:23 -0700628static int reset_tzapps(struct pil_desc *pil)
Matt Wagantall8d440572011-08-24 22:45:35 -0700629{
630 return pas_auth_and_reset(PAS_TZAPPS);
631}
632
Stephen Boyd3f4da322011-08-30 01:03:23 -0700633static int shutdown_tzapps(struct pil_desc *pil)
Matt Wagantall8d440572011-08-24 22:45:35 -0700634{
635 return pas_shutdown(PAS_TZAPPS);
636}
637
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638static struct pil_reset_ops pil_modem_fw_q6_ops = {
639 .init_image = init_image_modem_fw_q6_untrusted,
640 .verify_blob = verify_blob,
641 .auth_and_reset = reset_modem_fw_q6_untrusted,
642 .shutdown = shutdown_modem_fw_q6_untrusted,
643};
644
645static struct pil_reset_ops pil_modem_sw_q6_ops = {
646 .init_image = init_image_modem_sw_q6_untrusted,
647 .verify_blob = verify_blob,
648 .auth_and_reset = reset_modem_sw_q6_untrusted,
649 .shutdown = shutdown_modem_sw_q6_untrusted,
650};
651
652static struct pil_reset_ops pil_lpass_q6_ops = {
653 .init_image = init_image_lpass_q6_untrusted,
654 .verify_blob = verify_blob,
655 .auth_and_reset = reset_lpass_q6_untrusted,
656 .shutdown = shutdown_lpass_q6_untrusted,
657};
658
659static struct pil_reset_ops pil_riva_ops = {
660 .init_image = init_image_riva_untrusted,
661 .verify_blob = verify_blob,
662 .auth_and_reset = reset_riva_untrusted,
663 .shutdown = shutdown_riva_untrusted,
664};
665
666struct pil_reset_ops pil_dsps_ops = {
667 .init_image = init_image_dsps_untrusted,
668 .verify_blob = verify_blob,
669 .auth_and_reset = reset_dsps_untrusted,
670 .shutdown = shutdown_dsps_untrusted,
671};
672
Matt Wagantall8d440572011-08-24 22:45:35 -0700673struct pil_reset_ops pil_tzapps_ops = {
674 .init_image = init_image_tzapps,
675 .verify_blob = verify_blob,
676 .auth_and_reset = reset_tzapps,
677 .shutdown = shutdown_tzapps,
678};
679
Stephen Boyd3f4da322011-08-30 01:03:23 -0700680static struct platform_device pil_lpass_q6 = {
681 .name = "pil_lpass_q6",
682};
683
684static struct pil_desc pil_lpass_q6_desc = {
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700685 .name = "q6",
Stephen Boyd3f4da322011-08-30 01:03:23 -0700686 .dev = &pil_lpass_q6.dev,
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700687 .ops = &pil_lpass_q6_ops,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688};
689
Stephen Boyd3f4da322011-08-30 01:03:23 -0700690static struct platform_device pil_modem_fw_q6 = {
691 .name = "pil_modem_fw_q6",
692};
693
694static struct pil_desc pil_modem_fw_q6_desc = {
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700695 .name = "modem_fw",
696 .depends_on = "q6",
Stephen Boyd3f4da322011-08-30 01:03:23 -0700697 .dev = &pil_modem_fw_q6.dev,
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700698 .ops = &pil_modem_fw_q6_ops,
699};
700
Stephen Boyd3f4da322011-08-30 01:03:23 -0700701static struct platform_device pil_modem_sw_q6 = {
702 .name = "pil_modem_sw_q6",
703};
704
705static struct pil_desc pil_modem_sw_q6_desc = {
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700706 .name = "modem",
707 .depends_on = "modem_fw",
Stephen Boyd3f4da322011-08-30 01:03:23 -0700708 .dev = &pil_modem_sw_q6.dev,
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700709 .ops = &pil_modem_sw_q6_ops,
710};
711
Stephen Boyd3f4da322011-08-30 01:03:23 -0700712static struct platform_device pil_riva = {
713 .name = "pil_riva",
714};
715
716static struct pil_desc pil_riva_desc = {
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700717 .name = "wcnss",
Stephen Boyd3f4da322011-08-30 01:03:23 -0700718 .dev = &pil_riva.dev,
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700719 .ops = &pil_riva_ops,
720};
721
Stephen Boyd3f4da322011-08-30 01:03:23 -0700722static struct platform_device pil_dsps = {
723 .name = "pil_dsps",
724};
725
726static struct pil_desc pil_dsps_desc = {
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700727 .name = "dsps",
Stephen Boyd3f4da322011-08-30 01:03:23 -0700728 .dev = &pil_dsps.dev,
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700729 .ops = &pil_dsps_ops,
730};
731
Stephen Boyd3f4da322011-08-30 01:03:23 -0700732static struct platform_device pil_tzapps = {
733 .name = "pil_tzapps",
734};
735
736static struct pil_desc pil_tzapps_desc = {
Matt Wagantall8d440572011-08-24 22:45:35 -0700737 .name = "tzapps",
Stephen Boyd3f4da322011-08-30 01:03:23 -0700738 .dev = &pil_tzapps.dev,
Matt Wagantall8d440572011-08-24 22:45:35 -0700739 .ops = &pil_tzapps_ops,
740};
741
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700742static int __init q6_reset_init(struct q6_data *q6)
743{
744 int err;
745
746 q6->reg_base = ioremap(q6->reg_base_phys, SZ_256);
747 if (!q6->reg_base) {
748 err = -ENOMEM;
749 goto err_map;
750 }
751
752 q6->vreg = regulator_get(NULL, q6->name);
753 if (IS_ERR(q6->vreg)) {
754 err = PTR_ERR(q6->vreg);
755 goto err_vreg;
756 }
757
758 return 0;
759
760err_vreg:
761 iounmap(q6->reg_base);
762err_map:
763 return err;
764}
765
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700766static void __init use_secure_pil(void)
767{
768
Stephen Boyde44ec392011-08-29 12:03:24 -0700769 if (pas_supported(PAS_Q6) > 0) {
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700770 pil_lpass_q6_ops.init_image = init_image_lpass_q6_trusted;
771 pil_lpass_q6_ops.auth_and_reset = reset_lpass_q6_trusted;
772 pil_lpass_q6_ops.shutdown = shutdown_lpass_q6_trusted;
773 }
774
Stephen Boyde44ec392011-08-29 12:03:24 -0700775 if (pas_supported(PAS_MODEM_FW) > 0) {
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700776 pil_modem_fw_q6_ops.init_image = init_image_modem_fw_q6_trusted;
777 pil_modem_fw_q6_ops.auth_and_reset = reset_modem_fw_q6_trusted;
778 pil_modem_fw_q6_ops.shutdown = shutdown_modem_fw_q6_trusted;
779 }
780
Stephen Boyde44ec392011-08-29 12:03:24 -0700781 if (pas_supported(PAS_MODEM_SW) > 0) {
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700782 pil_modem_sw_q6_ops.init_image = init_image_modem_sw_q6_trusted;
783 pil_modem_sw_q6_ops.auth_and_reset = reset_modem_sw_q6_trusted;
784 pil_modem_sw_q6_ops.shutdown = shutdown_modem_sw_q6_trusted;
785 }
786
Stephen Boyde44ec392011-08-29 12:03:24 -0700787 if (pas_supported(PAS_DSPS) > 0) {
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700788 pil_dsps_ops.init_image = init_image_dsps_trusted;
789 pil_dsps_ops.auth_and_reset = reset_dsps_trusted;
790 pil_dsps_ops.shutdown = shutdown_dsps_trusted;
791 }
792
Stephen Boyde44ec392011-08-29 12:03:24 -0700793 if (pas_supported(PAS_RIVA) > 0) {
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700794 pil_riva_ops.init_image = init_image_riva_trusted;
795 pil_riva_ops.auth_and_reset = reset_riva_trusted;
796 pil_riva_ops.shutdown = shutdown_riva_trusted;
797 }
798}
Stephen Boyd9802ca92011-05-25 15:09:59 -0700799
Stephen Boyde44ec392011-08-29 12:03:24 -0700800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801static int __init msm_peripheral_reset_init(void)
802{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700803 int err;
804
805 /*
806 * Don't initialize PIL on simulated targets, as some
807 * subsystems may not be emulated on them.
808 */
809 if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
810 return 0;
811
Stephen Boyde44ec392011-08-29 12:03:24 -0700812 use_secure_pil();
Stephen Boyd9802ca92011-05-25 15:09:59 -0700813
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700814 err = q6_reset_init(&q6_lpass);
815 if (err)
816 return err;
Stephen Boyd3f4da322011-08-30 01:03:23 -0700817 BUG_ON(platform_device_register(&pil_lpass_q6));
818 BUG_ON(msm_pil_register(&pil_lpass_q6_desc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700820 mss_enable_reg = ioremap(MSM_MSS_ENABLE_PHYS, 4);
821 if (!mss_enable_reg)
822 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700824 err = q6_reset_init(&q6_modem_fw);
825 if (err) {
826 iounmap(mss_enable_reg);
827 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 }
Stephen Boyd3f4da322011-08-30 01:03:23 -0700829 BUG_ON(platform_device_register(&pil_modem_fw_q6));
830 if (err) {
831 iounmap(mss_enable_reg);
832 return err;
833 }
834 BUG_ON(msm_pil_register(&pil_modem_fw_q6_desc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700836 err = q6_reset_init(&q6_modem_sw);
837 if (err)
838 return err;
Stephen Boyd3f4da322011-08-30 01:03:23 -0700839 BUG_ON(platform_device_register(&pil_modem_sw_q6));
840 BUG_ON(msm_pil_register(&pil_modem_sw_q6_desc));
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700841
Stephen Boyd3f4da322011-08-30 01:03:23 -0700842 BUG_ON(platform_device_register(&pil_dsps));
843 BUG_ON(msm_pil_register(&pil_dsps_desc));
844 BUG_ON(platform_device_register(&pil_tzapps));
845 BUG_ON(msm_pil_register(&pil_tzapps_desc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846
847 msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700848 if (!msm_riva_base)
849 return -ENOMEM;
Stephen Boyd3f4da322011-08-30 01:03:23 -0700850 BUG_ON(platform_device_register(&pil_riva));
851 BUG_ON(msm_pil_register(&pil_riva_desc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852
853 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854}
855arch_initcall(msm_peripheral_reset_init);