blob: 30e0da27aef34046765298bf15bafcbf7b452ee9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/regulator/consumer.h>
Stephen Boyd9802ca92011-05-25 15:09:59 -070021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <asm/mach-types.h>
23
Stephen Boyd9802ca92011-05-25 15:09:59 -070024#include <mach/msm_iomap.h>
25#include <mach/scm.h>
26
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include "peripheral-loader.h"
28
29#define MSM_FW_QDSP6SS_PHYS 0x08800000
30#define MSM_SW_QDSP6SS_PHYS 0x08900000
31#define MSM_LPASS_QDSP6SS_PHYS 0x28800000
32#define MSM_MSS_ENABLE_PHYS 0x08B00000
33
34#define QDSP6SS_RST_EVB 0x0
35#define QDSP6SS_RESET 0x04
36#define QDSP6SS_CGC_OVERRIDE 0x18
37#define QDSP6SS_STRAP_TCM 0x1C
38#define QDSP6SS_STRAP_AHB 0x20
39#define QDSP6SS_GFMUX_CTL 0x30
40#define QDSP6SS_PWR_CTL 0x38
41
42#define MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C70)
43#define MSS_SLP_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C60)
44#define SFAB_MSS_M_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2340)
45#define SFAB_MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C00)
46#define SFAB_MSS_Q6_FW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2044)
47#define SFAB_MSS_Q6_SW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2040)
48#define SFAB_LPASS_Q6_ACLK_CTL (MSM_CLK_CTL_BASE + 0x23A0)
49#define MSS_Q6FW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C6C)
50#define MSS_Q6SW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C68)
51#define MSS_RESET (MSM_CLK_CTL_BASE + 0x2C64)
52
53#define Q6SS_SS_ARES BIT(0)
54#define Q6SS_CORE_ARES BIT(1)
55#define Q6SS_ISDB_ARES BIT(2)
56#define Q6SS_ETM_ARES BIT(3)
57#define Q6SS_STOP_CORE_ARES BIT(4)
58#define Q6SS_PRIV_ARES BIT(5)
59
60#define Q6SS_L2DATA_SLP_NRET_N BIT(0)
61#define Q6SS_SLP_RET_N BIT(1)
62#define Q6SS_L1TCM_SLP_NRET_N BIT(2)
63#define Q6SS_L2TAG_SLP_NRET_N BIT(3)
64#define Q6SS_ETB_SLEEP_NRET_N BIT(4)
65#define Q6SS_ARR_STBY_N BIT(5)
66#define Q6SS_CLAMP_IO BIT(6)
67
68#define Q6SS_CLK_ENA BIT(1)
69#define Q6SS_SRC_SWITCH_CLK_OVR BIT(8)
70#define Q6SS_AXIS_ACLK_EN BIT(9)
71
72#define MSM_RIVA_PHYS 0x03204000
73#define RIVA_PMU_A2XB_CFG (msm_riva_base + 0xB8)
74#define RIVA_PMU_A2XB_CFG_EN BIT(0)
75
76#define RIVA_PMU_CFG (msm_riva_base + 0x28)
77#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
78#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
79#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
80
81#define RIVA_PMU_OVRD_VAL (msm_riva_base + 0x30)
82#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
83#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
84
85#define RIVA_PMU_CCPU_CTL (msm_riva_base + 0x9C)
86#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
87#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
88
89#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR (msm_riva_base + 0xA0)
90
91#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
92#define PLL_MODE_OUTCTRL BIT(0)
93#define PLL_MODE_BYPASSNL BIT(1)
94#define PLL_MODE_RESET_N BIT(2)
95#define PLL_MODE_REF_XO_SEL 0x30
96#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
97#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
98#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
99#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
100#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
101#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
102#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
103
104#define RIVA_PMU_ROOT_CLK_SEL (msm_riva_base + 0xC8)
105#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
106
107#define RIVA_PMU_CLK_ROOT3 (msm_riva_base + 0x78)
108#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
109#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
110#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
111#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
112#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
113#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
114#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
115#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
116#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
117
118#define PPSS_RESET (MSM_CLK_CTL_BASE + 0x2594)
119#define PPSS_PROC_CLK_CTL (MSM_CLK_CTL_BASE + 0x2588)
120#define PPSS_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2580)
121
Stephen Boyd9802ca92011-05-25 15:09:59 -0700122#define PAS_Q6 1
123#define PAS_DSPS 2
124#define PAS_MODEM_SW 4
125#define PAS_MODEM_FW 5
126
127#define PAS_INIT_IMAGE_CMD 1
128#define PAS_MEM_CMD 2
129#define PAS_AUTH_AND_RESET_CMD 5
130#define PAS_SHUTDOWN_CMD 6
131
132struct pas_init_image_req {
133 u32 proc;
134 u32 image_addr;
135};
136
137struct pas_init_image_resp {
138 u32 image_valid;
139};
140
141struct pas_auth_image_req {
142 u32 proc;
143};
144
145struct pas_auth_image_resp {
146 u32 reset_initiated;
147};
148
149struct pas_shutdown_req {
150 u32 proc;
151};
152
153struct pas_shutdown_resp {
154 u32 success;
155};
156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157struct q6_data {
158 const unsigned strap_tcm_base;
159 const unsigned strap_ahb_upper;
160 const unsigned strap_ahb_lower;
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700161 const unsigned reg_base_phys;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700162 void __iomem *reg_base;
163 void __iomem *aclk_reg;
164 void __iomem *jtag_clk_reg;
165 int start_addr;
166 struct regulator *vreg;
167 bool vreg_enabled;
168 const char *name;
169};
170
171static struct q6_data q6_lpass = {
172 .strap_tcm_base = (0x146 << 16),
173 .strap_ahb_upper = (0x029 << 16),
174 .strap_ahb_lower = (0x028 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700175 .reg_base_phys = MSM_LPASS_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 .aclk_reg = SFAB_LPASS_Q6_ACLK_CTL,
177 .name = "q6_lpass",
178};
179
180static struct q6_data q6_modem_fw = {
181 .strap_tcm_base = (0x40 << 16),
182 .strap_ahb_upper = (0x09 << 16),
183 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700184 .reg_base_phys = MSM_FW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 .aclk_reg = SFAB_MSS_Q6_FW_ACLK_CTL,
186 .jtag_clk_reg = MSS_Q6FW_JTAG_CLK_CTL,
187 .name = "q6_modem_fw",
188};
189
190static struct q6_data q6_modem_sw = {
191 .strap_tcm_base = (0x42 << 16),
192 .strap_ahb_upper = (0x09 << 16),
193 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700194 .reg_base_phys = MSM_SW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 .aclk_reg = SFAB_MSS_Q6_SW_ACLK_CTL,
196 .jtag_clk_reg = MSS_Q6SW_JTAG_CLK_CTL,
197 .name = "q6_modem_sw",
198};
199
200static void __iomem *mss_enable_reg;
201static void __iomem *msm_riva_base;
202static unsigned long riva_start;
203
Stephen Boyd9802ca92011-05-25 15:09:59 -0700204static int init_image_trusted(int id, const u8 *metadata, size_t size)
205{
206 int ret;
207 struct pas_init_image_req request;
208 struct pas_init_image_resp resp = {0};
209 void *mdata_buf;
210
211 /* Make memory physically contiguous */
212 mdata_buf = kmemdup(metadata, size, GFP_KERNEL);
213 if (!mdata_buf)
214 return -ENOMEM;
215
216 request.proc = id;
217 request.image_addr = virt_to_phys(mdata_buf);
218
219 ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
220 sizeof(request), &resp, sizeof(resp));
221 kfree(mdata_buf);
222
223 if (ret)
224 return ret;
225 return resp.image_valid;
226}
227
228static int init_image_lpass_q6_trusted(const u8 *metadata, size_t size)
229{
230 return init_image_trusted(PAS_Q6, metadata, size);
231}
232
233static int init_image_modem_fw_q6_trusted(const u8 *metadata, size_t size)
234{
235 return init_image_trusted(PAS_MODEM_FW, metadata, size);
236}
237
238static int init_image_modem_sw_q6_trusted(const u8 *metadata, size_t size)
239{
240 return init_image_trusted(PAS_MODEM_SW, metadata, size);
241}
242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243static int init_image_lpass_q6_untrusted(const u8 *metadata, size_t size)
244{
245 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
246 q6_lpass.start_addr = ehdr->e_entry;
247 return 0;
248}
249
250static int init_image_modem_fw_q6_untrusted(const u8 *metadata, size_t size)
251{
252 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
253 q6_modem_fw.start_addr = ehdr->e_entry;
254 return 0;
255}
256
257static int init_image_modem_sw_q6_untrusted(const u8 *metadata, size_t size)
258{
259 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
260 q6_modem_sw.start_addr = ehdr->e_entry;
261 return 0;
262}
263
264static int verify_blob(u32 phy_addr, size_t size)
265{
266 return 0;
267}
268
Stephen Boyd9802ca92011-05-25 15:09:59 -0700269static int auth_and_reset_trusted(int id)
270{
271 int ret;
272 struct pas_auth_image_req request;
273 struct pas_auth_image_resp resp = {0};
274
275 request.proc = id;
276 ret = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &request,
277 sizeof(request), &resp, sizeof(resp));
278 if (ret)
279 return ret;
280
281 return resp.reset_initiated;
282}
283
Stephen Boydb6b54852011-08-16 14:16:27 -0700284static int power_up_q6(struct q6_data *q6)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700285{
286 int err;
287
288 err = regulator_set_voltage(q6->vreg, 1050000, 1050000);
289 if (err) {
290 pr_err("Failed to set %s regulator's voltage.\n", q6->name);
291 return err;
292 }
Stephen Boydb6b54852011-08-16 14:16:27 -0700293 err = regulator_set_optimum_mode(q6->vreg, 100000);
294 if (err < 0) {
295 pr_err("Failed to set %s regulator's mode.\n", q6->name);
296 return err;
297 }
Stephen Boyd9802ca92011-05-25 15:09:59 -0700298 err = regulator_enable(q6->vreg);
299 if (err) {
300 pr_err("Failed to enable %s's regulator.\n", q6->name);
301 return err;
302 }
303 q6->vreg_enabled = true;
Stephen Boydb6b54852011-08-16 14:16:27 -0700304 return 0;
Stephen Boyd9802ca92011-05-25 15:09:59 -0700305}
306
Stephen Boydb6b54852011-08-16 14:16:27 -0700307static int reset_q6_trusted(int id, struct q6_data *q6)
308{
309 int err = power_up_q6(q6);
310 if (err)
311 return err;
312 return auth_and_reset_trusted(id);
313}
Stephen Boyd9802ca92011-05-25 15:09:59 -0700314
315static int reset_lpass_q6_trusted(void)
316{
317 return reset_q6_trusted(PAS_Q6, &q6_lpass);
318}
319
320static int reset_modem_fw_q6_trusted(void)
321{
322 return reset_q6_trusted(PAS_MODEM_FW, &q6_modem_fw);
323}
324
325static int reset_modem_sw_q6_trusted(void)
326{
327 return reset_q6_trusted(PAS_MODEM_SW, &q6_modem_sw);
328}
329
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330static int reset_q6_untrusted(struct q6_data *q6)
331{
332 u32 reg, err = 0;
333
Stephen Boydb6b54852011-08-16 14:16:27 -0700334 err = power_up_q6(q6);
335 if (err)
336 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337 /* Enable Q6 ACLK */
338 writel_relaxed(0x10, q6->aclk_reg);
339
340 if (q6 == &q6_modem_fw || q6 == &q6_modem_sw) {
341 /* Enable MSS clocks */
342 writel_relaxed(0x10, SFAB_MSS_M_ACLK_CTL);
343 writel_relaxed(0x10, SFAB_MSS_S_HCLK_CTL);
344 writel_relaxed(0x10, MSS_S_HCLK_CTL);
345 writel_relaxed(0x10, MSS_SLP_CLK_CTL);
346 /* Wait for clocks to enable */
347 mb();
348 udelay(10);
349
350 /* Enable JTAG clocks */
351 /* TODO: Remove if/when Q6 software enables them? */
352 writel_relaxed(0x10, q6->jtag_clk_reg);
353
354 /* De-assert MSS reset */
355 writel_relaxed(0x0, MSS_RESET);
356 mb();
357 udelay(10);
358
359 /* Enable MSS */
360 writel_relaxed(0x7, mss_enable_reg);
361 }
362
363 /*
364 * Assert AXIS_ACLK_EN override to allow for correct updating of the
365 * QDSP6_CORE_STATE status bit. This is mandatory only for the SW Q6
366 * in 8960v1 and optional elsewhere.
367 */
368 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
369 reg |= Q6SS_AXIS_ACLK_EN;
370 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
371
372 /* Deassert Q6SS_SS_ARES */
373 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
374 reg &= ~(Q6SS_SS_ARES);
375 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
376
377 /* Program boot address */
378 writel_relaxed((q6->start_addr >> 8) & 0xFFFFFF,
379 q6->reg_base + QDSP6SS_RST_EVB);
380
381 /* Program TCM and AHB address ranges */
382 writel_relaxed(q6->strap_tcm_base, q6->reg_base + QDSP6SS_STRAP_TCM);
383 writel_relaxed(q6->strap_ahb_upper | q6->strap_ahb_lower,
384 q6->reg_base + QDSP6SS_STRAP_AHB);
385
386 /* Turn off Q6 core clock */
387 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
388 q6->reg_base + QDSP6SS_GFMUX_CTL);
389
390 /* Put memories to sleep */
391 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
392
393 /* Assert resets */
394 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
395 reg |= (Q6SS_CORE_ARES | Q6SS_ISDB_ARES | Q6SS_ETM_ARES
396 | Q6SS_STOP_CORE_ARES);
397 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
398
399 /* Wait 8 AHB cycles for Q6 to be fully reset (AHB = 1.5Mhz) */
400 mb();
401 usleep_range(20, 30);
402
403 /* Turn on Q6 memories */
404 reg = Q6SS_L2DATA_SLP_NRET_N | Q6SS_SLP_RET_N | Q6SS_L1TCM_SLP_NRET_N
405 | Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLEEP_NRET_N | Q6SS_ARR_STBY_N
406 | Q6SS_CLAMP_IO;
407 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
408
409 /* Turn on Q6 core clock */
410 reg = Q6SS_CLK_ENA | Q6SS_SRC_SWITCH_CLK_OVR;
411 writel_relaxed(reg, q6->reg_base + QDSP6SS_GFMUX_CTL);
412
413 /* Remove Q6SS_CLAMP_IO */
414 reg = readl_relaxed(q6->reg_base + QDSP6SS_PWR_CTL);
415 reg &= ~Q6SS_CLAMP_IO;
416 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
417
418 /* Bring Q6 core out of reset and start execution. */
419 writel_relaxed(0x0, q6->reg_base + QDSP6SS_RESET);
420
421 /*
422 * Re-enable auto-gating of AXIS_ACLK at lease one AXI clock cycle
423 * after resets are de-asserted.
424 */
425 mb();
426 usleep_range(1, 10);
427 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
428 reg &= ~Q6SS_AXIS_ACLK_EN;
429 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
430
Stephen Boydb6b54852011-08-16 14:16:27 -0700431 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432}
433
434static int reset_lpass_q6_untrusted(void)
435{
436 return reset_q6_untrusted(&q6_lpass);
437}
438
439static int reset_modem_fw_q6_untrusted(void)
440{
441 return reset_q6_untrusted(&q6_modem_fw);
442}
443
444static int reset_modem_sw_q6_untrusted(void)
445{
446 return reset_q6_untrusted(&q6_modem_sw);
447}
448
Stephen Boyd9802ca92011-05-25 15:09:59 -0700449static int shutdown_trusted(int id)
450{
451 int ret;
452 struct pas_shutdown_req request;
453 struct pas_shutdown_resp resp = {0};
454
455 request.proc = id;
456 ret = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &request, sizeof(request),
457 &resp, sizeof(resp));
458 if (ret)
459 return ret;
460
461 return resp.success;
462}
463
464static int shutdown_q6_trusted(int id, struct q6_data *q6)
465{
466 int ret;
467
468 ret = shutdown_trusted(id);
Matt Wagantalldafcd3d2011-08-02 20:27:59 -0700469 if (ret)
470 return ret;
471
Stephen Boyd9802ca92011-05-25 15:09:59 -0700472 if (q6->vreg_enabled) {
473 regulator_disable(q6->vreg);
474 q6->vreg_enabled = false;
475 }
476
477 return ret;
478}
479
480static int shutdown_lpass_q6_trusted(void)
481{
482 return shutdown_q6_trusted(PAS_Q6, &q6_lpass);
483}
484
485static int shutdown_modem_fw_q6_trusted(void)
486{
487 return shutdown_q6_trusted(PAS_MODEM_FW, &q6_modem_fw);
488}
489
490static int shutdown_modem_sw_q6_trusted(void)
491{
492 return shutdown_q6_trusted(PAS_MODEM_SW, &q6_modem_sw);
493}
494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495static int shutdown_q6_untrusted(struct q6_data *q6)
496{
497 u32 reg;
498
499 /* Turn off Q6 core clock */
500 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
501 q6->reg_base + QDSP6SS_GFMUX_CTL);
502
503 /* Assert resets */
504 reg = (Q6SS_SS_ARES | Q6SS_CORE_ARES | Q6SS_ISDB_ARES
505 | Q6SS_ETM_ARES | Q6SS_STOP_CORE_ARES | Q6SS_PRIV_ARES);
506 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
507
508 /* Turn off Q6 memories */
509 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
510
511 /* Put Modem Subsystem back into reset when shutting down FWQ6 */
512 if (q6 == &q6_modem_fw)
513 writel_relaxed(0x1, MSS_RESET);
514
515 if (q6->vreg_enabled) {
516 regulator_disable(q6->vreg);
517 q6->vreg_enabled = false;
518 }
519
520 return 0;
521}
522
523static int shutdown_lpass_q6_untrusted(void)
524{
525 return shutdown_q6_untrusted(&q6_lpass);
526}
527
528static int shutdown_modem_fw_q6_untrusted(void)
529{
530 return shutdown_q6_untrusted(&q6_modem_fw);
531}
532
533static int shutdown_modem_sw_q6_untrusted(void)
534{
535 return shutdown_q6_untrusted(&q6_modem_sw);
536}
537
538static int init_image_riva_untrusted(const u8 *metadata, size_t size)
539{
540 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
541 riva_start = ehdr->e_entry;
542 return 0;
543}
544
545static int reset_riva_untrusted(void)
546{
547 u32 reg;
548 bool xo;
549
550 /* Enable A2XB bridge */
551 reg = readl(RIVA_PMU_A2XB_CFG);
552 reg |= RIVA_PMU_A2XB_CFG_EN;
553 writel(reg, RIVA_PMU_A2XB_CFG);
554
555 /* Determine which XO to use */
556 reg = readl(RIVA_PMU_CFG);
557 xo = (reg & RIVA_PMU_CFG_IRIS_XO_MODE) == RIVA_PMU_CFG_IRIS_XO_MODE_48;
558
559 /* Program PLL 13 to 960 MHz */
560 reg = readl(RIVA_PLL_MODE);
561 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
562 writel(reg, RIVA_PLL_MODE);
563
564 if (xo)
565 writel(0x40000C00 | 40, RIVA_PLL_L_VAL);
566 else
567 writel(0x40000C00 | 50, RIVA_PLL_L_VAL);
568 writel(0, RIVA_PLL_M_VAL);
569 writel(1, RIVA_PLL_N_VAL);
570 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
571
572 reg = readl(RIVA_PLL_MODE);
573 reg &= ~(PLL_MODE_REF_XO_SEL);
574 reg |= xo ? PLL_MODE_REF_XO_SEL_RF : PLL_MODE_REF_XO_SEL_CXO;
575 writel(reg, RIVA_PLL_MODE);
576
577 /* Enable PLL 13 */
578 reg |= PLL_MODE_BYPASSNL;
579 writel(reg, RIVA_PLL_MODE);
580
581 usleep_range(10, 20);
582
583 reg |= PLL_MODE_RESET_N;
584 writel(reg, RIVA_PLL_MODE);
585 reg |= PLL_MODE_OUTCTRL;
586 writel(reg, RIVA_PLL_MODE);
587
588 /* Wait for PLL to settle */
589 usleep_range(50, 100);
590
591 /* Configure cCPU for 240 MHz */
592 reg = readl(RIVA_PMU_CLK_ROOT3);
593 if (readl(RIVA_PMU_ROOT_CLK_SEL) & RIVA_PMU_ROOT_CLK_SEL_3) {
594 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
595 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
596 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
597 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
598 } else {
599 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
600 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
601 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
602 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
603 }
604 writel(reg, RIVA_PMU_CLK_ROOT3);
605 reg |= RIVA_PMU_CLK_ROOT3_ENA;
606 writel(reg, RIVA_PMU_CLK_ROOT3);
607 reg = readl(RIVA_PMU_ROOT_CLK_SEL);
608 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
609 writel(reg, RIVA_PMU_ROOT_CLK_SEL);
610
611 /* Use the high vector table */
612 reg = readl(RIVA_PMU_CCPU_CTL);
613 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
614 writel(reg, RIVA_PMU_CCPU_CTL);
615
616 /* Set base memory address */
617 writel_relaxed(riva_start >> 16, RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
618
619 /* Clear warmboot bit indicating this is a cold boot */
620 reg = readl(RIVA_PMU_CFG);
621 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
622 writel(reg, RIVA_PMU_CFG);
623
624 /* Enable the cCPU clock */
625 reg = readl(RIVA_PMU_OVRD_VAL);
626 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
627 writel(reg, RIVA_PMU_OVRD_VAL);
628
629 /* Take cCPU out of reset */
630 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
631 writel(reg, RIVA_PMU_OVRD_VAL);
632
633 return 0;
634}
635
636static int shutdown_riva_untrusted(void)
637{
638 u32 reg;
639 /* Put riva into reset */
640 reg = readl(RIVA_PMU_OVRD_VAL);
641 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
642 writel(reg, RIVA_PMU_OVRD_VAL);
643 return 0;
644}
645
646static int init_image_dsps_untrusted(const u8 *metadata, size_t size)
647{
648 /* Bring memory and bus interface out of reset */
649 writel_relaxed(0x2, PPSS_RESET);
650 writel_relaxed(0x10, PPSS_HCLK_CTL);
651 return 0;
652}
653
654static int reset_dsps_untrusted(void)
655{
656 writel_relaxed(0x10, PPSS_PROC_CLK_CTL);
657 /* Bring DSPS out of reset */
658 writel_relaxed(0x0, PPSS_RESET);
659 return 0;
660}
661
662static int shutdown_dsps_untrusted(void)
663{
664 writel_relaxed(0x2, PPSS_RESET);
665 writel_relaxed(0x0, PPSS_PROC_CLK_CTL);
666 return 0;
667}
668
Stephen Boyd9802ca92011-05-25 15:09:59 -0700669static int init_image_dsps_trusted(const u8 *metadata, size_t size)
670{
671 return init_image_trusted(PAS_DSPS, metadata, size);
672}
673
674static int reset_dsps_trusted(void)
675{
676 return auth_and_reset_trusted(PAS_DSPS);
677}
678
679static int shutdown_dsps_trusted(void)
680{
681 return shutdown_trusted(PAS_DSPS);
682}
683
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684static struct pil_reset_ops pil_modem_fw_q6_ops = {
685 .init_image = init_image_modem_fw_q6_untrusted,
686 .verify_blob = verify_blob,
687 .auth_and_reset = reset_modem_fw_q6_untrusted,
688 .shutdown = shutdown_modem_fw_q6_untrusted,
689};
690
691static struct pil_reset_ops pil_modem_sw_q6_ops = {
692 .init_image = init_image_modem_sw_q6_untrusted,
693 .verify_blob = verify_blob,
694 .auth_and_reset = reset_modem_sw_q6_untrusted,
695 .shutdown = shutdown_modem_sw_q6_untrusted,
696};
697
698static struct pil_reset_ops pil_lpass_q6_ops = {
699 .init_image = init_image_lpass_q6_untrusted,
700 .verify_blob = verify_blob,
701 .auth_and_reset = reset_lpass_q6_untrusted,
702 .shutdown = shutdown_lpass_q6_untrusted,
703};
704
705static struct pil_reset_ops pil_riva_ops = {
706 .init_image = init_image_riva_untrusted,
707 .verify_blob = verify_blob,
708 .auth_and_reset = reset_riva_untrusted,
709 .shutdown = shutdown_riva_untrusted,
710};
711
712struct pil_reset_ops pil_dsps_ops = {
713 .init_image = init_image_dsps_untrusted,
714 .verify_blob = verify_blob,
715 .auth_and_reset = reset_dsps_untrusted,
716 .shutdown = shutdown_dsps_untrusted,
717};
718
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700719static struct pil_device pil_lpass_q6 = {
720 .name = "q6",
721 .pdev = {
722 .name = "pil_lpass_q6",
723 .id = -1,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 },
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700725 .ops = &pil_lpass_q6_ops,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726};
727
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700728static struct pil_device pil_modem_fw_q6 = {
729 .name = "modem_fw",
730 .depends_on = "q6",
731 .pdev = {
732 .name = "pil_modem_fw_q6",
733 .id = -1,
734 },
735 .ops = &pil_modem_fw_q6_ops,
736};
737
738static struct pil_device pil_modem_sw_q6 = {
739 .name = "modem",
740 .depends_on = "modem_fw",
741 .pdev = {
742 .name = "pil_modem_sw_q6",
743 .id = -1,
744 },
745 .ops = &pil_modem_sw_q6_ops,
746};
747
748static struct pil_device pil_riva = {
749 .name = "wcnss",
750 .pdev = {
751 .name = "pil_riva",
752 .id = -1,
753 },
754 .ops = &pil_riva_ops,
755};
756
757static struct pil_device pil_dsps = {
758 .name = "dsps",
759 .pdev = {
760 .name = "pil_dsps",
761 .id = -1,
762 },
763 .ops = &pil_dsps_ops,
764};
765
766static int __init q6_reset_init(struct q6_data *q6)
767{
768 int err;
769
770 q6->reg_base = ioremap(q6->reg_base_phys, SZ_256);
771 if (!q6->reg_base) {
772 err = -ENOMEM;
773 goto err_map;
774 }
775
776 q6->vreg = regulator_get(NULL, q6->name);
777 if (IS_ERR(q6->vreg)) {
778 err = PTR_ERR(q6->vreg);
779 goto err_vreg;
780 }
781
782 return 0;
783
784err_vreg:
785 iounmap(q6->reg_base);
786err_map:
787 return err;
788}
789
Stephen Boyd9802ca92011-05-25 15:09:59 -0700790#ifdef CONFIG_MSM_SECURE_PIL
791static bool secure_pil = true;
792#else
793static bool secure_pil;
794#endif
795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796static int __init msm_peripheral_reset_init(void)
797{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 int err;
799
800 /*
801 * Don't initialize PIL on simulated targets, as some
802 * subsystems may not be emulated on them.
803 */
804 if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
805 return 0;
806
Stephen Boyd9802ca92011-05-25 15:09:59 -0700807 if (secure_pil) {
808 pil_lpass_q6_ops.init_image = init_image_lpass_q6_trusted;
809 pil_lpass_q6_ops.auth_and_reset = reset_lpass_q6_trusted;
810 pil_lpass_q6_ops.shutdown = shutdown_lpass_q6_trusted;
811
812 pil_modem_fw_q6_ops.init_image = init_image_modem_fw_q6_trusted;
813 pil_modem_fw_q6_ops.auth_and_reset = reset_modem_fw_q6_trusted;
814 pil_modem_fw_q6_ops.shutdown = shutdown_modem_fw_q6_trusted;
815
816 pil_modem_sw_q6_ops.init_image = init_image_modem_sw_q6_trusted;
817 pil_modem_sw_q6_ops.auth_and_reset = reset_modem_sw_q6_trusted;
818 pil_modem_sw_q6_ops.shutdown = shutdown_modem_sw_q6_trusted;
819
820 pil_dsps_ops.init_image = init_image_dsps_trusted;
821 pil_dsps_ops.auth_and_reset = reset_dsps_trusted;
822 pil_dsps_ops.shutdown = shutdown_dsps_trusted;
823 }
824
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700825 err = q6_reset_init(&q6_lpass);
826 if (err)
827 return err;
828 msm_pil_add_device(&pil_lpass_q6);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700830 mss_enable_reg = ioremap(MSM_MSS_ENABLE_PHYS, 4);
831 if (!mss_enable_reg)
832 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700834 err = q6_reset_init(&q6_modem_fw);
835 if (err) {
836 iounmap(mss_enable_reg);
837 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 }
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700839 msm_pil_add_device(&pil_modem_fw_q6);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700841 err = q6_reset_init(&q6_modem_sw);
842 if (err)
843 return err;
844 msm_pil_add_device(&pil_modem_sw_q6);
845
846 msm_pil_add_device(&pil_dsps);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847
848 msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700849 if (!msm_riva_base)
850 return -ENOMEM;
851 msm_pil_add_device(&pil_riva);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852
853 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854}
855arch_initcall(msm_peripheral_reset_init);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700856module_param(secure_pil, bool, S_IRUGO);
857MODULE_PARM_DESC(secure_pil, "Use Secure PIL");