blob: 007d22cee864879cfa65f380451d87a6c8e93aa0 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/regulator/consumer.h>
21#include <mach/msm_iomap.h>
22#include <asm/mach-types.h>
23
24#include "peripheral-loader.h"
25
26#define MSM_FW_QDSP6SS_PHYS 0x08800000
27#define MSM_SW_QDSP6SS_PHYS 0x08900000
28#define MSM_LPASS_QDSP6SS_PHYS 0x28800000
29#define MSM_MSS_ENABLE_PHYS 0x08B00000
30
31#define QDSP6SS_RST_EVB 0x0
32#define QDSP6SS_RESET 0x04
33#define QDSP6SS_CGC_OVERRIDE 0x18
34#define QDSP6SS_STRAP_TCM 0x1C
35#define QDSP6SS_STRAP_AHB 0x20
36#define QDSP6SS_GFMUX_CTL 0x30
37#define QDSP6SS_PWR_CTL 0x38
38
39#define MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C70)
40#define MSS_SLP_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C60)
41#define SFAB_MSS_M_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2340)
42#define SFAB_MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C00)
43#define SFAB_MSS_Q6_FW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2044)
44#define SFAB_MSS_Q6_SW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2040)
45#define SFAB_LPASS_Q6_ACLK_CTL (MSM_CLK_CTL_BASE + 0x23A0)
46#define MSS_Q6FW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C6C)
47#define MSS_Q6SW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C68)
48#define MSS_RESET (MSM_CLK_CTL_BASE + 0x2C64)
49
50#define Q6SS_SS_ARES BIT(0)
51#define Q6SS_CORE_ARES BIT(1)
52#define Q6SS_ISDB_ARES BIT(2)
53#define Q6SS_ETM_ARES BIT(3)
54#define Q6SS_STOP_CORE_ARES BIT(4)
55#define Q6SS_PRIV_ARES BIT(5)
56
57#define Q6SS_L2DATA_SLP_NRET_N BIT(0)
58#define Q6SS_SLP_RET_N BIT(1)
59#define Q6SS_L1TCM_SLP_NRET_N BIT(2)
60#define Q6SS_L2TAG_SLP_NRET_N BIT(3)
61#define Q6SS_ETB_SLEEP_NRET_N BIT(4)
62#define Q6SS_ARR_STBY_N BIT(5)
63#define Q6SS_CLAMP_IO BIT(6)
64
65#define Q6SS_CLK_ENA BIT(1)
66#define Q6SS_SRC_SWITCH_CLK_OVR BIT(8)
67#define Q6SS_AXIS_ACLK_EN BIT(9)
68
69#define MSM_RIVA_PHYS 0x03204000
70#define RIVA_PMU_A2XB_CFG (msm_riva_base + 0xB8)
71#define RIVA_PMU_A2XB_CFG_EN BIT(0)
72
73#define RIVA_PMU_CFG (msm_riva_base + 0x28)
74#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
75#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
76#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
77
78#define RIVA_PMU_OVRD_VAL (msm_riva_base + 0x30)
79#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
80#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
81
82#define RIVA_PMU_CCPU_CTL (msm_riva_base + 0x9C)
83#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
84#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
85
86#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR (msm_riva_base + 0xA0)
87
88#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
89#define PLL_MODE_OUTCTRL BIT(0)
90#define PLL_MODE_BYPASSNL BIT(1)
91#define PLL_MODE_RESET_N BIT(2)
92#define PLL_MODE_REF_XO_SEL 0x30
93#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
94#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
95#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
96#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
97#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
98#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
99#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
100
101#define RIVA_PMU_ROOT_CLK_SEL (msm_riva_base + 0xC8)
102#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
103
104#define RIVA_PMU_CLK_ROOT3 (msm_riva_base + 0x78)
105#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
106#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
107#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
108#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
109#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
110#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
111#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
112#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
113#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
114
115#define PPSS_RESET (MSM_CLK_CTL_BASE + 0x2594)
116#define PPSS_PROC_CLK_CTL (MSM_CLK_CTL_BASE + 0x2588)
117#define PPSS_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2580)
118
119struct q6_data {
120 const unsigned strap_tcm_base;
121 const unsigned strap_ahb_upper;
122 const unsigned strap_ahb_lower;
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700123 const unsigned reg_base_phys;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124 void __iomem *reg_base;
125 void __iomem *aclk_reg;
126 void __iomem *jtag_clk_reg;
127 int start_addr;
128 struct regulator *vreg;
129 bool vreg_enabled;
130 const char *name;
131};
132
133static struct q6_data q6_lpass = {
134 .strap_tcm_base = (0x146 << 16),
135 .strap_ahb_upper = (0x029 << 16),
136 .strap_ahb_lower = (0x028 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700137 .reg_base_phys = MSM_LPASS_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138 .aclk_reg = SFAB_LPASS_Q6_ACLK_CTL,
139 .name = "q6_lpass",
140};
141
142static struct q6_data q6_modem_fw = {
143 .strap_tcm_base = (0x40 << 16),
144 .strap_ahb_upper = (0x09 << 16),
145 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700146 .reg_base_phys = MSM_FW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147 .aclk_reg = SFAB_MSS_Q6_FW_ACLK_CTL,
148 .jtag_clk_reg = MSS_Q6FW_JTAG_CLK_CTL,
149 .name = "q6_modem_fw",
150};
151
152static struct q6_data q6_modem_sw = {
153 .strap_tcm_base = (0x42 << 16),
154 .strap_ahb_upper = (0x09 << 16),
155 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700156 .reg_base_phys = MSM_SW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 .aclk_reg = SFAB_MSS_Q6_SW_ACLK_CTL,
158 .jtag_clk_reg = MSS_Q6SW_JTAG_CLK_CTL,
159 .name = "q6_modem_sw",
160};
161
162static void __iomem *mss_enable_reg;
163static void __iomem *msm_riva_base;
164static unsigned long riva_start;
165
166static int init_image_lpass_q6_untrusted(const u8 *metadata, size_t size)
167{
168 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
169 q6_lpass.start_addr = ehdr->e_entry;
170 return 0;
171}
172
173static int init_image_modem_fw_q6_untrusted(const u8 *metadata, size_t size)
174{
175 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
176 q6_modem_fw.start_addr = ehdr->e_entry;
177 return 0;
178}
179
180static int init_image_modem_sw_q6_untrusted(const u8 *metadata, size_t size)
181{
182 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
183 q6_modem_sw.start_addr = ehdr->e_entry;
184 return 0;
185}
186
187static int verify_blob(u32 phy_addr, size_t size)
188{
189 return 0;
190}
191
192static int reset_q6_untrusted(struct q6_data *q6)
193{
194 u32 reg, err = 0;
195
196 err = regulator_set_voltage(q6->vreg, 1050000, 1050000);
197 if (err) {
198 pr_err("Failed to set %s regulator's voltage.\n", q6->name);
199 goto out;
200 }
201 err = regulator_enable(q6->vreg);
202 if (err) {
203 pr_err("Failed to enable %s's regulator.\n", q6->name);
204 goto out;
205 }
206 q6->vreg_enabled = true;
207
208 /* Enable Q6 ACLK */
209 writel_relaxed(0x10, q6->aclk_reg);
210
211 if (q6 == &q6_modem_fw || q6 == &q6_modem_sw) {
212 /* Enable MSS clocks */
213 writel_relaxed(0x10, SFAB_MSS_M_ACLK_CTL);
214 writel_relaxed(0x10, SFAB_MSS_S_HCLK_CTL);
215 writel_relaxed(0x10, MSS_S_HCLK_CTL);
216 writel_relaxed(0x10, MSS_SLP_CLK_CTL);
217 /* Wait for clocks to enable */
218 mb();
219 udelay(10);
220
221 /* Enable JTAG clocks */
222 /* TODO: Remove if/when Q6 software enables them? */
223 writel_relaxed(0x10, q6->jtag_clk_reg);
224
225 /* De-assert MSS reset */
226 writel_relaxed(0x0, MSS_RESET);
227 mb();
228 udelay(10);
229
230 /* Enable MSS */
231 writel_relaxed(0x7, mss_enable_reg);
232 }
233
234 /*
235 * Assert AXIS_ACLK_EN override to allow for correct updating of the
236 * QDSP6_CORE_STATE status bit. This is mandatory only for the SW Q6
237 * in 8960v1 and optional elsewhere.
238 */
239 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
240 reg |= Q6SS_AXIS_ACLK_EN;
241 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
242
243 /* Deassert Q6SS_SS_ARES */
244 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
245 reg &= ~(Q6SS_SS_ARES);
246 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
247
248 /* Program boot address */
249 writel_relaxed((q6->start_addr >> 8) & 0xFFFFFF,
250 q6->reg_base + QDSP6SS_RST_EVB);
251
252 /* Program TCM and AHB address ranges */
253 writel_relaxed(q6->strap_tcm_base, q6->reg_base + QDSP6SS_STRAP_TCM);
254 writel_relaxed(q6->strap_ahb_upper | q6->strap_ahb_lower,
255 q6->reg_base + QDSP6SS_STRAP_AHB);
256
257 /* Turn off Q6 core clock */
258 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
259 q6->reg_base + QDSP6SS_GFMUX_CTL);
260
261 /* Put memories to sleep */
262 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
263
264 /* Assert resets */
265 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
266 reg |= (Q6SS_CORE_ARES | Q6SS_ISDB_ARES | Q6SS_ETM_ARES
267 | Q6SS_STOP_CORE_ARES);
268 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
269
270 /* Wait 8 AHB cycles for Q6 to be fully reset (AHB = 1.5Mhz) */
271 mb();
272 usleep_range(20, 30);
273
274 /* Turn on Q6 memories */
275 reg = Q6SS_L2DATA_SLP_NRET_N | Q6SS_SLP_RET_N | Q6SS_L1TCM_SLP_NRET_N
276 | Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLEEP_NRET_N | Q6SS_ARR_STBY_N
277 | Q6SS_CLAMP_IO;
278 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
279
280 /* Turn on Q6 core clock */
281 reg = Q6SS_CLK_ENA | Q6SS_SRC_SWITCH_CLK_OVR;
282 writel_relaxed(reg, q6->reg_base + QDSP6SS_GFMUX_CTL);
283
284 /* Remove Q6SS_CLAMP_IO */
285 reg = readl_relaxed(q6->reg_base + QDSP6SS_PWR_CTL);
286 reg &= ~Q6SS_CLAMP_IO;
287 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
288
289 /* Bring Q6 core out of reset and start execution. */
290 writel_relaxed(0x0, q6->reg_base + QDSP6SS_RESET);
291
292 /*
293 * Re-enable auto-gating of AXIS_ACLK at lease one AXI clock cycle
294 * after resets are de-asserted.
295 */
296 mb();
297 usleep_range(1, 10);
298 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
299 reg &= ~Q6SS_AXIS_ACLK_EN;
300 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
301
302out:
303 return err;
304}
305
306static int reset_lpass_q6_untrusted(void)
307{
308 return reset_q6_untrusted(&q6_lpass);
309}
310
311static int reset_modem_fw_q6_untrusted(void)
312{
313 return reset_q6_untrusted(&q6_modem_fw);
314}
315
316static int reset_modem_sw_q6_untrusted(void)
317{
318 return reset_q6_untrusted(&q6_modem_sw);
319}
320
321static int shutdown_q6_untrusted(struct q6_data *q6)
322{
323 u32 reg;
324
325 /* Turn off Q6 core clock */
326 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
327 q6->reg_base + QDSP6SS_GFMUX_CTL);
328
329 /* Assert resets */
330 reg = (Q6SS_SS_ARES | Q6SS_CORE_ARES | Q6SS_ISDB_ARES
331 | Q6SS_ETM_ARES | Q6SS_STOP_CORE_ARES | Q6SS_PRIV_ARES);
332 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
333
334 /* Turn off Q6 memories */
335 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
336
337 /* Put Modem Subsystem back into reset when shutting down FWQ6 */
338 if (q6 == &q6_modem_fw)
339 writel_relaxed(0x1, MSS_RESET);
340
341 if (q6->vreg_enabled) {
342 regulator_disable(q6->vreg);
343 q6->vreg_enabled = false;
344 }
345
346 return 0;
347}
348
349static int shutdown_lpass_q6_untrusted(void)
350{
351 return shutdown_q6_untrusted(&q6_lpass);
352}
353
354static int shutdown_modem_fw_q6_untrusted(void)
355{
356 return shutdown_q6_untrusted(&q6_modem_fw);
357}
358
359static int shutdown_modem_sw_q6_untrusted(void)
360{
361 return shutdown_q6_untrusted(&q6_modem_sw);
362}
363
364static int init_image_riva_untrusted(const u8 *metadata, size_t size)
365{
366 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
367 riva_start = ehdr->e_entry;
368 return 0;
369}
370
371static int reset_riva_untrusted(void)
372{
373 u32 reg;
374 bool xo;
375
376 /* Enable A2XB bridge */
377 reg = readl(RIVA_PMU_A2XB_CFG);
378 reg |= RIVA_PMU_A2XB_CFG_EN;
379 writel(reg, RIVA_PMU_A2XB_CFG);
380
381 /* Determine which XO to use */
382 reg = readl(RIVA_PMU_CFG);
383 xo = (reg & RIVA_PMU_CFG_IRIS_XO_MODE) == RIVA_PMU_CFG_IRIS_XO_MODE_48;
384
385 /* Program PLL 13 to 960 MHz */
386 reg = readl(RIVA_PLL_MODE);
387 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
388 writel(reg, RIVA_PLL_MODE);
389
390 if (xo)
391 writel(0x40000C00 | 40, RIVA_PLL_L_VAL);
392 else
393 writel(0x40000C00 | 50, RIVA_PLL_L_VAL);
394 writel(0, RIVA_PLL_M_VAL);
395 writel(1, RIVA_PLL_N_VAL);
396 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
397
398 reg = readl(RIVA_PLL_MODE);
399 reg &= ~(PLL_MODE_REF_XO_SEL);
400 reg |= xo ? PLL_MODE_REF_XO_SEL_RF : PLL_MODE_REF_XO_SEL_CXO;
401 writel(reg, RIVA_PLL_MODE);
402
403 /* Enable PLL 13 */
404 reg |= PLL_MODE_BYPASSNL;
405 writel(reg, RIVA_PLL_MODE);
406
407 usleep_range(10, 20);
408
409 reg |= PLL_MODE_RESET_N;
410 writel(reg, RIVA_PLL_MODE);
411 reg |= PLL_MODE_OUTCTRL;
412 writel(reg, RIVA_PLL_MODE);
413
414 /* Wait for PLL to settle */
415 usleep_range(50, 100);
416
417 /* Configure cCPU for 240 MHz */
418 reg = readl(RIVA_PMU_CLK_ROOT3);
419 if (readl(RIVA_PMU_ROOT_CLK_SEL) & RIVA_PMU_ROOT_CLK_SEL_3) {
420 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
421 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
422 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
423 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
424 } else {
425 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
426 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
427 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
428 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
429 }
430 writel(reg, RIVA_PMU_CLK_ROOT3);
431 reg |= RIVA_PMU_CLK_ROOT3_ENA;
432 writel(reg, RIVA_PMU_CLK_ROOT3);
433 reg = readl(RIVA_PMU_ROOT_CLK_SEL);
434 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
435 writel(reg, RIVA_PMU_ROOT_CLK_SEL);
436
437 /* Use the high vector table */
438 reg = readl(RIVA_PMU_CCPU_CTL);
439 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
440 writel(reg, RIVA_PMU_CCPU_CTL);
441
442 /* Set base memory address */
443 writel_relaxed(riva_start >> 16, RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
444
445 /* Clear warmboot bit indicating this is a cold boot */
446 reg = readl(RIVA_PMU_CFG);
447 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
448 writel(reg, RIVA_PMU_CFG);
449
450 /* Enable the cCPU clock */
451 reg = readl(RIVA_PMU_OVRD_VAL);
452 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
453 writel(reg, RIVA_PMU_OVRD_VAL);
454
455 /* Take cCPU out of reset */
456 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
457 writel(reg, RIVA_PMU_OVRD_VAL);
458
459 return 0;
460}
461
462static int shutdown_riva_untrusted(void)
463{
464 u32 reg;
465 /* Put riva into reset */
466 reg = readl(RIVA_PMU_OVRD_VAL);
467 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
468 writel(reg, RIVA_PMU_OVRD_VAL);
469 return 0;
470}
471
472static int init_image_dsps_untrusted(const u8 *metadata, size_t size)
473{
474 /* Bring memory and bus interface out of reset */
475 writel_relaxed(0x2, PPSS_RESET);
476 writel_relaxed(0x10, PPSS_HCLK_CTL);
477 return 0;
478}
479
480static int reset_dsps_untrusted(void)
481{
482 writel_relaxed(0x10, PPSS_PROC_CLK_CTL);
483 /* Bring DSPS out of reset */
484 writel_relaxed(0x0, PPSS_RESET);
485 return 0;
486}
487
488static int shutdown_dsps_untrusted(void)
489{
490 writel_relaxed(0x2, PPSS_RESET);
491 writel_relaxed(0x0, PPSS_PROC_CLK_CTL);
492 return 0;
493}
494
495static struct pil_reset_ops pil_modem_fw_q6_ops = {
496 .init_image = init_image_modem_fw_q6_untrusted,
497 .verify_blob = verify_blob,
498 .auth_and_reset = reset_modem_fw_q6_untrusted,
499 .shutdown = shutdown_modem_fw_q6_untrusted,
500};
501
502static struct pil_reset_ops pil_modem_sw_q6_ops = {
503 .init_image = init_image_modem_sw_q6_untrusted,
504 .verify_blob = verify_blob,
505 .auth_and_reset = reset_modem_sw_q6_untrusted,
506 .shutdown = shutdown_modem_sw_q6_untrusted,
507};
508
509static struct pil_reset_ops pil_lpass_q6_ops = {
510 .init_image = init_image_lpass_q6_untrusted,
511 .verify_blob = verify_blob,
512 .auth_and_reset = reset_lpass_q6_untrusted,
513 .shutdown = shutdown_lpass_q6_untrusted,
514};
515
516static struct pil_reset_ops pil_riva_ops = {
517 .init_image = init_image_riva_untrusted,
518 .verify_blob = verify_blob,
519 .auth_and_reset = reset_riva_untrusted,
520 .shutdown = shutdown_riva_untrusted,
521};
522
523struct pil_reset_ops pil_dsps_ops = {
524 .init_image = init_image_dsps_untrusted,
525 .verify_blob = verify_blob,
526 .auth_and_reset = reset_dsps_untrusted,
527 .shutdown = shutdown_dsps_untrusted,
528};
529
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700530static struct pil_device pil_lpass_q6 = {
531 .name = "q6",
532 .pdev = {
533 .name = "pil_lpass_q6",
534 .id = -1,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 },
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700536 .ops = &pil_lpass_q6_ops,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537};
538
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700539static struct pil_device pil_modem_fw_q6 = {
540 .name = "modem_fw",
541 .depends_on = "q6",
542 .pdev = {
543 .name = "pil_modem_fw_q6",
544 .id = -1,
545 },
546 .ops = &pil_modem_fw_q6_ops,
547};
548
549static struct pil_device pil_modem_sw_q6 = {
550 .name = "modem",
551 .depends_on = "modem_fw",
552 .pdev = {
553 .name = "pil_modem_sw_q6",
554 .id = -1,
555 },
556 .ops = &pil_modem_sw_q6_ops,
557};
558
559static struct pil_device pil_riva = {
560 .name = "wcnss",
561 .pdev = {
562 .name = "pil_riva",
563 .id = -1,
564 },
565 .ops = &pil_riva_ops,
566};
567
568static struct pil_device pil_dsps = {
569 .name = "dsps",
570 .pdev = {
571 .name = "pil_dsps",
572 .id = -1,
573 },
574 .ops = &pil_dsps_ops,
575};
576
577static int __init q6_reset_init(struct q6_data *q6)
578{
579 int err;
580
581 q6->reg_base = ioremap(q6->reg_base_phys, SZ_256);
582 if (!q6->reg_base) {
583 err = -ENOMEM;
584 goto err_map;
585 }
586
587 q6->vreg = regulator_get(NULL, q6->name);
588 if (IS_ERR(q6->vreg)) {
589 err = PTR_ERR(q6->vreg);
590 goto err_vreg;
591 }
592
593 return 0;
594
595err_vreg:
596 iounmap(q6->reg_base);
597err_map:
598 return err;
599}
600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601static int __init msm_peripheral_reset_init(void)
602{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603 int err;
604
605 /*
606 * Don't initialize PIL on simulated targets, as some
607 * subsystems may not be emulated on them.
608 */
609 if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
610 return 0;
611
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700612 err = q6_reset_init(&q6_lpass);
613 if (err)
614 return err;
615 msm_pil_add_device(&pil_lpass_q6);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700617 mss_enable_reg = ioremap(MSM_MSS_ENABLE_PHYS, 4);
618 if (!mss_enable_reg)
619 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700621 err = q6_reset_init(&q6_modem_fw);
622 if (err) {
623 iounmap(mss_enable_reg);
624 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 }
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700626 msm_pil_add_device(&pil_modem_fw_q6);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700628 err = q6_reset_init(&q6_modem_sw);
629 if (err)
630 return err;
631 msm_pil_add_device(&pil_modem_sw_q6);
632
633 msm_pil_add_device(&pil_dsps);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
635 msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700636 if (!msm_riva_base)
637 return -ENOMEM;
638 msm_pil_add_device(&pil_riva);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639
640 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641}
642arch_initcall(msm_peripheral_reset_init);