blob: 19fe1c5e09d96491a62aab14e00d3a6d3b3e51a8 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/regulator/consumer.h>
21#include <mach/msm_iomap.h>
22#include <asm/mach-types.h>
23
24#include "peripheral-loader.h"
25
26#define MSM_FW_QDSP6SS_PHYS 0x08800000
27#define MSM_SW_QDSP6SS_PHYS 0x08900000
28#define MSM_LPASS_QDSP6SS_PHYS 0x28800000
29#define MSM_MSS_ENABLE_PHYS 0x08B00000
30
31#define QDSP6SS_RST_EVB 0x0
32#define QDSP6SS_RESET 0x04
33#define QDSP6SS_CGC_OVERRIDE 0x18
34#define QDSP6SS_STRAP_TCM 0x1C
35#define QDSP6SS_STRAP_AHB 0x20
36#define QDSP6SS_GFMUX_CTL 0x30
37#define QDSP6SS_PWR_CTL 0x38
38
39#define MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C70)
40#define MSS_SLP_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C60)
41#define SFAB_MSS_M_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2340)
42#define SFAB_MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C00)
43#define SFAB_MSS_Q6_FW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2044)
44#define SFAB_MSS_Q6_SW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2040)
45#define SFAB_LPASS_Q6_ACLK_CTL (MSM_CLK_CTL_BASE + 0x23A0)
46#define MSS_Q6FW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C6C)
47#define MSS_Q6SW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C68)
48#define MSS_RESET (MSM_CLK_CTL_BASE + 0x2C64)
49
50#define Q6SS_SS_ARES BIT(0)
51#define Q6SS_CORE_ARES BIT(1)
52#define Q6SS_ISDB_ARES BIT(2)
53#define Q6SS_ETM_ARES BIT(3)
54#define Q6SS_STOP_CORE_ARES BIT(4)
55#define Q6SS_PRIV_ARES BIT(5)
56
57#define Q6SS_L2DATA_SLP_NRET_N BIT(0)
58#define Q6SS_SLP_RET_N BIT(1)
59#define Q6SS_L1TCM_SLP_NRET_N BIT(2)
60#define Q6SS_L2TAG_SLP_NRET_N BIT(3)
61#define Q6SS_ETB_SLEEP_NRET_N BIT(4)
62#define Q6SS_ARR_STBY_N BIT(5)
63#define Q6SS_CLAMP_IO BIT(6)
64
65#define Q6SS_CLK_ENA BIT(1)
66#define Q6SS_SRC_SWITCH_CLK_OVR BIT(8)
67#define Q6SS_AXIS_ACLK_EN BIT(9)
68
69#define MSM_RIVA_PHYS 0x03204000
70#define RIVA_PMU_A2XB_CFG (msm_riva_base + 0xB8)
71#define RIVA_PMU_A2XB_CFG_EN BIT(0)
72
73#define RIVA_PMU_CFG (msm_riva_base + 0x28)
74#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
75#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
76#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
77
78#define RIVA_PMU_OVRD_VAL (msm_riva_base + 0x30)
79#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
80#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
81
82#define RIVA_PMU_CCPU_CTL (msm_riva_base + 0x9C)
83#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
84#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
85
86#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR (msm_riva_base + 0xA0)
87
88#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
89#define PLL_MODE_OUTCTRL BIT(0)
90#define PLL_MODE_BYPASSNL BIT(1)
91#define PLL_MODE_RESET_N BIT(2)
92#define PLL_MODE_REF_XO_SEL 0x30
93#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
94#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
95#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
96#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
97#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
98#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
99#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
100
101#define RIVA_PMU_ROOT_CLK_SEL (msm_riva_base + 0xC8)
102#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
103
104#define RIVA_PMU_CLK_ROOT3 (msm_riva_base + 0x78)
105#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
106#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
107#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
108#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
109#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
110#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
111#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
112#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
113#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
114
115#define PPSS_RESET (MSM_CLK_CTL_BASE + 0x2594)
116#define PPSS_PROC_CLK_CTL (MSM_CLK_CTL_BASE + 0x2588)
117#define PPSS_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2580)
118
119struct q6_data {
120 const unsigned strap_tcm_base;
121 const unsigned strap_ahb_upper;
122 const unsigned strap_ahb_lower;
123 void __iomem *reg_base;
124 void __iomem *aclk_reg;
125 void __iomem *jtag_clk_reg;
126 int start_addr;
127 struct regulator *vreg;
128 bool vreg_enabled;
129 const char *name;
130};
131
132static struct q6_data q6_lpass = {
133 .strap_tcm_base = (0x146 << 16),
134 .strap_ahb_upper = (0x029 << 16),
135 .strap_ahb_lower = (0x028 << 4),
136 .aclk_reg = SFAB_LPASS_Q6_ACLK_CTL,
137 .name = "q6_lpass",
138};
139
140static struct q6_data q6_modem_fw = {
141 .strap_tcm_base = (0x40 << 16),
142 .strap_ahb_upper = (0x09 << 16),
143 .strap_ahb_lower = (0x08 << 4),
144 .aclk_reg = SFAB_MSS_Q6_FW_ACLK_CTL,
145 .jtag_clk_reg = MSS_Q6FW_JTAG_CLK_CTL,
146 .name = "q6_modem_fw",
147};
148
149static struct q6_data q6_modem_sw = {
150 .strap_tcm_base = (0x42 << 16),
151 .strap_ahb_upper = (0x09 << 16),
152 .strap_ahb_lower = (0x08 << 4),
153 .aclk_reg = SFAB_MSS_Q6_SW_ACLK_CTL,
154 .jtag_clk_reg = MSS_Q6SW_JTAG_CLK_CTL,
155 .name = "q6_modem_sw",
156};
157
158static void __iomem *mss_enable_reg;
159static void __iomem *msm_riva_base;
160static unsigned long riva_start;
161
162static int init_image_lpass_q6_untrusted(const u8 *metadata, size_t size)
163{
164 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
165 q6_lpass.start_addr = ehdr->e_entry;
166 return 0;
167}
168
169static int init_image_modem_fw_q6_untrusted(const u8 *metadata, size_t size)
170{
171 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
172 q6_modem_fw.start_addr = ehdr->e_entry;
173 return 0;
174}
175
176static int init_image_modem_sw_q6_untrusted(const u8 *metadata, size_t size)
177{
178 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
179 q6_modem_sw.start_addr = ehdr->e_entry;
180 return 0;
181}
182
183static int verify_blob(u32 phy_addr, size_t size)
184{
185 return 0;
186}
187
188static int reset_q6_untrusted(struct q6_data *q6)
189{
190 u32 reg, err = 0;
191
192 err = regulator_set_voltage(q6->vreg, 1050000, 1050000);
193 if (err) {
194 pr_err("Failed to set %s regulator's voltage.\n", q6->name);
195 goto out;
196 }
197 err = regulator_enable(q6->vreg);
198 if (err) {
199 pr_err("Failed to enable %s's regulator.\n", q6->name);
200 goto out;
201 }
202 q6->vreg_enabled = true;
203
204 /* Enable Q6 ACLK */
205 writel_relaxed(0x10, q6->aclk_reg);
206
207 if (q6 == &q6_modem_fw || q6 == &q6_modem_sw) {
208 /* Enable MSS clocks */
209 writel_relaxed(0x10, SFAB_MSS_M_ACLK_CTL);
210 writel_relaxed(0x10, SFAB_MSS_S_HCLK_CTL);
211 writel_relaxed(0x10, MSS_S_HCLK_CTL);
212 writel_relaxed(0x10, MSS_SLP_CLK_CTL);
213 /* Wait for clocks to enable */
214 mb();
215 udelay(10);
216
217 /* Enable JTAG clocks */
218 /* TODO: Remove if/when Q6 software enables them? */
219 writel_relaxed(0x10, q6->jtag_clk_reg);
220
221 /* De-assert MSS reset */
222 writel_relaxed(0x0, MSS_RESET);
223 mb();
224 udelay(10);
225
226 /* Enable MSS */
227 writel_relaxed(0x7, mss_enable_reg);
228 }
229
230 /*
231 * Assert AXIS_ACLK_EN override to allow for correct updating of the
232 * QDSP6_CORE_STATE status bit. This is mandatory only for the SW Q6
233 * in 8960v1 and optional elsewhere.
234 */
235 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
236 reg |= Q6SS_AXIS_ACLK_EN;
237 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
238
239 /* Deassert Q6SS_SS_ARES */
240 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
241 reg &= ~(Q6SS_SS_ARES);
242 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
243
244 /* Program boot address */
245 writel_relaxed((q6->start_addr >> 8) & 0xFFFFFF,
246 q6->reg_base + QDSP6SS_RST_EVB);
247
248 /* Program TCM and AHB address ranges */
249 writel_relaxed(q6->strap_tcm_base, q6->reg_base + QDSP6SS_STRAP_TCM);
250 writel_relaxed(q6->strap_ahb_upper | q6->strap_ahb_lower,
251 q6->reg_base + QDSP6SS_STRAP_AHB);
252
253 /* Turn off Q6 core clock */
254 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
255 q6->reg_base + QDSP6SS_GFMUX_CTL);
256
257 /* Put memories to sleep */
258 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
259
260 /* Assert resets */
261 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
262 reg |= (Q6SS_CORE_ARES | Q6SS_ISDB_ARES | Q6SS_ETM_ARES
263 | Q6SS_STOP_CORE_ARES);
264 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
265
266 /* Wait 8 AHB cycles for Q6 to be fully reset (AHB = 1.5Mhz) */
267 mb();
268 usleep_range(20, 30);
269
270 /* Turn on Q6 memories */
271 reg = Q6SS_L2DATA_SLP_NRET_N | Q6SS_SLP_RET_N | Q6SS_L1TCM_SLP_NRET_N
272 | Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLEEP_NRET_N | Q6SS_ARR_STBY_N
273 | Q6SS_CLAMP_IO;
274 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
275
276 /* Turn on Q6 core clock */
277 reg = Q6SS_CLK_ENA | Q6SS_SRC_SWITCH_CLK_OVR;
278 writel_relaxed(reg, q6->reg_base + QDSP6SS_GFMUX_CTL);
279
280 /* Remove Q6SS_CLAMP_IO */
281 reg = readl_relaxed(q6->reg_base + QDSP6SS_PWR_CTL);
282 reg &= ~Q6SS_CLAMP_IO;
283 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
284
285 /* Bring Q6 core out of reset and start execution. */
286 writel_relaxed(0x0, q6->reg_base + QDSP6SS_RESET);
287
288 /*
289 * Re-enable auto-gating of AXIS_ACLK at lease one AXI clock cycle
290 * after resets are de-asserted.
291 */
292 mb();
293 usleep_range(1, 10);
294 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
295 reg &= ~Q6SS_AXIS_ACLK_EN;
296 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
297
298out:
299 return err;
300}
301
302static int reset_lpass_q6_untrusted(void)
303{
304 return reset_q6_untrusted(&q6_lpass);
305}
306
307static int reset_modem_fw_q6_untrusted(void)
308{
309 return reset_q6_untrusted(&q6_modem_fw);
310}
311
312static int reset_modem_sw_q6_untrusted(void)
313{
314 return reset_q6_untrusted(&q6_modem_sw);
315}
316
317static int shutdown_q6_untrusted(struct q6_data *q6)
318{
319 u32 reg;
320
321 /* Turn off Q6 core clock */
322 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
323 q6->reg_base + QDSP6SS_GFMUX_CTL);
324
325 /* Assert resets */
326 reg = (Q6SS_SS_ARES | Q6SS_CORE_ARES | Q6SS_ISDB_ARES
327 | Q6SS_ETM_ARES | Q6SS_STOP_CORE_ARES | Q6SS_PRIV_ARES);
328 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
329
330 /* Turn off Q6 memories */
331 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
332
333 /* Put Modem Subsystem back into reset when shutting down FWQ6 */
334 if (q6 == &q6_modem_fw)
335 writel_relaxed(0x1, MSS_RESET);
336
337 if (q6->vreg_enabled) {
338 regulator_disable(q6->vreg);
339 q6->vreg_enabled = false;
340 }
341
342 return 0;
343}
344
345static int shutdown_lpass_q6_untrusted(void)
346{
347 return shutdown_q6_untrusted(&q6_lpass);
348}
349
350static int shutdown_modem_fw_q6_untrusted(void)
351{
352 return shutdown_q6_untrusted(&q6_modem_fw);
353}
354
355static int shutdown_modem_sw_q6_untrusted(void)
356{
357 return shutdown_q6_untrusted(&q6_modem_sw);
358}
359
360static int init_image_riva_untrusted(const u8 *metadata, size_t size)
361{
362 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
363 riva_start = ehdr->e_entry;
364 return 0;
365}
366
367static int reset_riva_untrusted(void)
368{
369 u32 reg;
370 bool xo;
371
372 /* Enable A2XB bridge */
373 reg = readl(RIVA_PMU_A2XB_CFG);
374 reg |= RIVA_PMU_A2XB_CFG_EN;
375 writel(reg, RIVA_PMU_A2XB_CFG);
376
377 /* Determine which XO to use */
378 reg = readl(RIVA_PMU_CFG);
379 xo = (reg & RIVA_PMU_CFG_IRIS_XO_MODE) == RIVA_PMU_CFG_IRIS_XO_MODE_48;
380
381 /* Program PLL 13 to 960 MHz */
382 reg = readl(RIVA_PLL_MODE);
383 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
384 writel(reg, RIVA_PLL_MODE);
385
386 if (xo)
387 writel(0x40000C00 | 40, RIVA_PLL_L_VAL);
388 else
389 writel(0x40000C00 | 50, RIVA_PLL_L_VAL);
390 writel(0, RIVA_PLL_M_VAL);
391 writel(1, RIVA_PLL_N_VAL);
392 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
393
394 reg = readl(RIVA_PLL_MODE);
395 reg &= ~(PLL_MODE_REF_XO_SEL);
396 reg |= xo ? PLL_MODE_REF_XO_SEL_RF : PLL_MODE_REF_XO_SEL_CXO;
397 writel(reg, RIVA_PLL_MODE);
398
399 /* Enable PLL 13 */
400 reg |= PLL_MODE_BYPASSNL;
401 writel(reg, RIVA_PLL_MODE);
402
403 usleep_range(10, 20);
404
405 reg |= PLL_MODE_RESET_N;
406 writel(reg, RIVA_PLL_MODE);
407 reg |= PLL_MODE_OUTCTRL;
408 writel(reg, RIVA_PLL_MODE);
409
410 /* Wait for PLL to settle */
411 usleep_range(50, 100);
412
413 /* Configure cCPU for 240 MHz */
414 reg = readl(RIVA_PMU_CLK_ROOT3);
415 if (readl(RIVA_PMU_ROOT_CLK_SEL) & RIVA_PMU_ROOT_CLK_SEL_3) {
416 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
417 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
418 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
419 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
420 } else {
421 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
422 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
423 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
424 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
425 }
426 writel(reg, RIVA_PMU_CLK_ROOT3);
427 reg |= RIVA_PMU_CLK_ROOT3_ENA;
428 writel(reg, RIVA_PMU_CLK_ROOT3);
429 reg = readl(RIVA_PMU_ROOT_CLK_SEL);
430 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
431 writel(reg, RIVA_PMU_ROOT_CLK_SEL);
432
433 /* Use the high vector table */
434 reg = readl(RIVA_PMU_CCPU_CTL);
435 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
436 writel(reg, RIVA_PMU_CCPU_CTL);
437
438 /* Set base memory address */
439 writel_relaxed(riva_start >> 16, RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
440
441 /* Clear warmboot bit indicating this is a cold boot */
442 reg = readl(RIVA_PMU_CFG);
443 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
444 writel(reg, RIVA_PMU_CFG);
445
446 /* Enable the cCPU clock */
447 reg = readl(RIVA_PMU_OVRD_VAL);
448 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
449 writel(reg, RIVA_PMU_OVRD_VAL);
450
451 /* Take cCPU out of reset */
452 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
453 writel(reg, RIVA_PMU_OVRD_VAL);
454
455 return 0;
456}
457
458static int shutdown_riva_untrusted(void)
459{
460 u32 reg;
461 /* Put riva into reset */
462 reg = readl(RIVA_PMU_OVRD_VAL);
463 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
464 writel(reg, RIVA_PMU_OVRD_VAL);
465 return 0;
466}
467
468static int init_image_dsps_untrusted(const u8 *metadata, size_t size)
469{
470 /* Bring memory and bus interface out of reset */
471 writel_relaxed(0x2, PPSS_RESET);
472 writel_relaxed(0x10, PPSS_HCLK_CTL);
473 return 0;
474}
475
476static int reset_dsps_untrusted(void)
477{
478 writel_relaxed(0x10, PPSS_PROC_CLK_CTL);
479 /* Bring DSPS out of reset */
480 writel_relaxed(0x0, PPSS_RESET);
481 return 0;
482}
483
484static int shutdown_dsps_untrusted(void)
485{
486 writel_relaxed(0x2, PPSS_RESET);
487 writel_relaxed(0x0, PPSS_PROC_CLK_CTL);
488 return 0;
489}
490
491static struct pil_reset_ops pil_modem_fw_q6_ops = {
492 .init_image = init_image_modem_fw_q6_untrusted,
493 .verify_blob = verify_blob,
494 .auth_and_reset = reset_modem_fw_q6_untrusted,
495 .shutdown = shutdown_modem_fw_q6_untrusted,
496};
497
498static struct pil_reset_ops pil_modem_sw_q6_ops = {
499 .init_image = init_image_modem_sw_q6_untrusted,
500 .verify_blob = verify_blob,
501 .auth_and_reset = reset_modem_sw_q6_untrusted,
502 .shutdown = shutdown_modem_sw_q6_untrusted,
503};
504
505static struct pil_reset_ops pil_lpass_q6_ops = {
506 .init_image = init_image_lpass_q6_untrusted,
507 .verify_blob = verify_blob,
508 .auth_and_reset = reset_lpass_q6_untrusted,
509 .shutdown = shutdown_lpass_q6_untrusted,
510};
511
512static struct pil_reset_ops pil_riva_ops = {
513 .init_image = init_image_riva_untrusted,
514 .verify_blob = verify_blob,
515 .auth_and_reset = reset_riva_untrusted,
516 .shutdown = shutdown_riva_untrusted,
517};
518
519struct pil_reset_ops pil_dsps_ops = {
520 .init_image = init_image_dsps_untrusted,
521 .verify_blob = verify_blob,
522 .auth_and_reset = reset_dsps_untrusted,
523 .shutdown = shutdown_dsps_untrusted,
524};
525
526static struct pil_device peripherals[] = {
527 {
528 .name = "q6",
529 .pdev = {
530 .name = "pil_lpass_q6",
531 .id = -1,
532 },
533 .ops = &pil_lpass_q6_ops,
534 },
535 {
536 .name = "modem_fw",
537 .depends_on = "q6",
538 .pdev = {
539 .name = "pil_modem_fw_q6",
540 .id = -1,
541 },
542 .ops = &pil_modem_fw_q6_ops,
543 },
544 {
545 .name = "modem",
546 .depends_on = "modem_fw",
547 .pdev = {
548 .name = "pil_modem_sw_q6",
549 .id = -1,
550 },
551 .ops = &pil_modem_sw_q6_ops,
552 },
553 {
554 .name = "wcnss",
555 .pdev = {
556 .name = "pil_riva",
557 .id = -1,
558 },
559 .ops = &pil_riva_ops,
560 },
561 {
562 .name = "dsps",
563 .pdev = {
564 .name = "pil_dsps",
565 .id = -1,
566 },
567 .ops = &pil_dsps_ops,
568 },
569};
570
571static int __init msm_peripheral_reset_init(void)
572{
573 unsigned i;
574 int err;
575
576 /*
577 * Don't initialize PIL on simulated targets, as some
578 * subsystems may not be emulated on them.
579 */
580 if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
581 return 0;
582
583 mss_enable_reg = ioremap(MSM_MSS_ENABLE_PHYS, 1);
584 if (!mss_enable_reg) {
585 err = -ENOMEM;
586 goto err_map_mss;
587 }
588
589 q6_lpass.reg_base = ioremap(MSM_LPASS_QDSP6SS_PHYS, SZ_256);
590 if (!q6_lpass.reg_base) {
591 err = -ENOMEM;
592 goto err_map_lpass_q6;
593 }
594
595 q6_modem_fw.reg_base = ioremap(MSM_FW_QDSP6SS_PHYS, SZ_256);
596 if (!q6_modem_fw.reg_base) {
597 err = -ENOMEM;
598 goto err_map_modem_fw_q6;
599 }
600
601 q6_modem_sw.reg_base = ioremap(MSM_SW_QDSP6SS_PHYS, SZ_256);
602 if (!q6_modem_sw.reg_base) {
603 err = -ENOMEM;
604 goto err_map_modem_sw_q6;
605 }
606
607 msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
608 if (!msm_riva_base) {
609 err = -ENOMEM;
610 goto err_map_riva;
611 }
612
613 q6_lpass.vreg = regulator_get(NULL, "lpass_q6");
614 if (IS_ERR(q6_lpass.vreg)) {
615 err = PTR_ERR(q6_lpass.vreg);
616 goto err_vreg_lpass;
617 }
618
619 q6_modem_fw.vreg = regulator_get(NULL, "modem_fw_q6");
620 if (IS_ERR(q6_modem_fw.vreg)) {
621 err = PTR_ERR(q6_modem_fw.vreg);
622 goto err_vreg_modem_fw_q6;
623 }
624
625 q6_modem_sw.vreg = regulator_get(NULL, "modem_sw_q6");
626 if (IS_ERR(q6_modem_sw.vreg)) {
627 err = PTR_ERR(q6_modem_sw.vreg);
628 goto err_vreg_modem_sw_q6;
629 }
630
631 for (i = 0; i < ARRAY_SIZE(peripherals); i++)
632 msm_pil_add_device(&peripherals[i]);
633
634 return 0;
635
636err_vreg_modem_sw_q6:
637 regulator_put(q6_modem_fw.vreg);
638err_vreg_modem_fw_q6:
639 regulator_put(q6_lpass.vreg);
640err_vreg_lpass:
641 iounmap(msm_riva_base);
642err_map_riva:
643 iounmap(q6_modem_sw.reg_base);
644err_map_modem_sw_q6:
645 iounmap(q6_modem_fw.reg_base);
646err_map_modem_fw_q6:
647 iounmap(q6_lpass.reg_base);
648err_map_lpass_q6:
649 iounmap(mss_enable_reg);
650err_map_mss:
651 return err;
652}
653arch_initcall(msm_peripheral_reset_init);