blob: ef7324436aa5e1b061787a38d36630b8ba463909 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/regulator/consumer.h>
Stephen Boyd9802ca92011-05-25 15:09:59 -070021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <asm/mach-types.h>
23
Stephen Boyd9802ca92011-05-25 15:09:59 -070024#include <mach/msm_iomap.h>
25#include <mach/scm.h>
26
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include "peripheral-loader.h"
28
29#define MSM_FW_QDSP6SS_PHYS 0x08800000
30#define MSM_SW_QDSP6SS_PHYS 0x08900000
31#define MSM_LPASS_QDSP6SS_PHYS 0x28800000
32#define MSM_MSS_ENABLE_PHYS 0x08B00000
33
34#define QDSP6SS_RST_EVB 0x0
35#define QDSP6SS_RESET 0x04
36#define QDSP6SS_CGC_OVERRIDE 0x18
37#define QDSP6SS_STRAP_TCM 0x1C
38#define QDSP6SS_STRAP_AHB 0x20
39#define QDSP6SS_GFMUX_CTL 0x30
40#define QDSP6SS_PWR_CTL 0x38
41
42#define MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C70)
43#define MSS_SLP_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C60)
44#define SFAB_MSS_M_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2340)
45#define SFAB_MSS_S_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2C00)
46#define SFAB_MSS_Q6_FW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2044)
47#define SFAB_MSS_Q6_SW_ACLK_CTL (MSM_CLK_CTL_BASE + 0x2040)
48#define SFAB_LPASS_Q6_ACLK_CTL (MSM_CLK_CTL_BASE + 0x23A0)
49#define MSS_Q6FW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C6C)
50#define MSS_Q6SW_JTAG_CLK_CTL (MSM_CLK_CTL_BASE + 0x2C68)
51#define MSS_RESET (MSM_CLK_CTL_BASE + 0x2C64)
52
53#define Q6SS_SS_ARES BIT(0)
54#define Q6SS_CORE_ARES BIT(1)
55#define Q6SS_ISDB_ARES BIT(2)
56#define Q6SS_ETM_ARES BIT(3)
57#define Q6SS_STOP_CORE_ARES BIT(4)
58#define Q6SS_PRIV_ARES BIT(5)
59
60#define Q6SS_L2DATA_SLP_NRET_N BIT(0)
61#define Q6SS_SLP_RET_N BIT(1)
62#define Q6SS_L1TCM_SLP_NRET_N BIT(2)
63#define Q6SS_L2TAG_SLP_NRET_N BIT(3)
64#define Q6SS_ETB_SLEEP_NRET_N BIT(4)
65#define Q6SS_ARR_STBY_N BIT(5)
66#define Q6SS_CLAMP_IO BIT(6)
67
68#define Q6SS_CLK_ENA BIT(1)
69#define Q6SS_SRC_SWITCH_CLK_OVR BIT(8)
70#define Q6SS_AXIS_ACLK_EN BIT(9)
71
72#define MSM_RIVA_PHYS 0x03204000
73#define RIVA_PMU_A2XB_CFG (msm_riva_base + 0xB8)
74#define RIVA_PMU_A2XB_CFG_EN BIT(0)
75
76#define RIVA_PMU_CFG (msm_riva_base + 0x28)
77#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
78#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
79#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
80
81#define RIVA_PMU_OVRD_VAL (msm_riva_base + 0x30)
82#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
83#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
84
85#define RIVA_PMU_CCPU_CTL (msm_riva_base + 0x9C)
86#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
87#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
88
89#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR (msm_riva_base + 0xA0)
90
91#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
92#define PLL_MODE_OUTCTRL BIT(0)
93#define PLL_MODE_BYPASSNL BIT(1)
94#define PLL_MODE_RESET_N BIT(2)
95#define PLL_MODE_REF_XO_SEL 0x30
96#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
97#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
98#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
99#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
100#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
101#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
102#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
103
104#define RIVA_PMU_ROOT_CLK_SEL (msm_riva_base + 0xC8)
105#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
106
107#define RIVA_PMU_CLK_ROOT3 (msm_riva_base + 0x78)
108#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
109#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
110#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
111#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
112#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
113#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
114#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
115#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
116#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
117
118#define PPSS_RESET (MSM_CLK_CTL_BASE + 0x2594)
119#define PPSS_PROC_CLK_CTL (MSM_CLK_CTL_BASE + 0x2588)
120#define PPSS_HCLK_CTL (MSM_CLK_CTL_BASE + 0x2580)
121
Stephen Boyd9802ca92011-05-25 15:09:59 -0700122#define PAS_Q6 1
123#define PAS_DSPS 2
124#define PAS_MODEM_SW 4
125#define PAS_MODEM_FW 5
Stephen Boydcc724232011-08-17 17:56:00 -0700126#define PAS_RIVA 6
Stephen Boyd9802ca92011-05-25 15:09:59 -0700127
128#define PAS_INIT_IMAGE_CMD 1
129#define PAS_MEM_CMD 2
130#define PAS_AUTH_AND_RESET_CMD 5
131#define PAS_SHUTDOWN_CMD 6
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700132#define PAS_IS_SUPPORTED_CMD 7
Stephen Boyd9802ca92011-05-25 15:09:59 -0700133
134struct pas_init_image_req {
135 u32 proc;
136 u32 image_addr;
137};
138
139struct pas_init_image_resp {
140 u32 image_valid;
141};
142
143struct pas_auth_image_req {
144 u32 proc;
145};
146
147struct pas_auth_image_resp {
148 u32 reset_initiated;
149};
150
151struct pas_shutdown_req {
152 u32 proc;
153};
154
155struct pas_shutdown_resp {
156 u32 success;
157};
158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159struct q6_data {
160 const unsigned strap_tcm_base;
161 const unsigned strap_ahb_upper;
162 const unsigned strap_ahb_lower;
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700163 const unsigned reg_base_phys;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 void __iomem *reg_base;
165 void __iomem *aclk_reg;
166 void __iomem *jtag_clk_reg;
167 int start_addr;
168 struct regulator *vreg;
169 bool vreg_enabled;
170 const char *name;
171};
172
173static struct q6_data q6_lpass = {
174 .strap_tcm_base = (0x146 << 16),
175 .strap_ahb_upper = (0x029 << 16),
176 .strap_ahb_lower = (0x028 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700177 .reg_base_phys = MSM_LPASS_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 .aclk_reg = SFAB_LPASS_Q6_ACLK_CTL,
179 .name = "q6_lpass",
180};
181
182static struct q6_data q6_modem_fw = {
183 .strap_tcm_base = (0x40 << 16),
184 .strap_ahb_upper = (0x09 << 16),
185 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700186 .reg_base_phys = MSM_FW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 .aclk_reg = SFAB_MSS_Q6_FW_ACLK_CTL,
188 .jtag_clk_reg = MSS_Q6FW_JTAG_CLK_CTL,
189 .name = "q6_modem_fw",
190};
191
192static struct q6_data q6_modem_sw = {
193 .strap_tcm_base = (0x42 << 16),
194 .strap_ahb_upper = (0x09 << 16),
195 .strap_ahb_lower = (0x08 << 4),
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700196 .reg_base_phys = MSM_SW_QDSP6SS_PHYS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 .aclk_reg = SFAB_MSS_Q6_SW_ACLK_CTL,
198 .jtag_clk_reg = MSS_Q6SW_JTAG_CLK_CTL,
199 .name = "q6_modem_sw",
200};
201
202static void __iomem *mss_enable_reg;
203static void __iomem *msm_riva_base;
204static unsigned long riva_start;
205
Stephen Boyd9802ca92011-05-25 15:09:59 -0700206static int init_image_trusted(int id, const u8 *metadata, size_t size)
207{
208 int ret;
209 struct pas_init_image_req request;
210 struct pas_init_image_resp resp = {0};
211 void *mdata_buf;
212
213 /* Make memory physically contiguous */
214 mdata_buf = kmemdup(metadata, size, GFP_KERNEL);
215 if (!mdata_buf)
216 return -ENOMEM;
217
218 request.proc = id;
219 request.image_addr = virt_to_phys(mdata_buf);
220
221 ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
222 sizeof(request), &resp, sizeof(resp));
223 kfree(mdata_buf);
224
225 if (ret)
226 return ret;
227 return resp.image_valid;
228}
229
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700230static int init_image_lpass_q6_trusted(struct pil_device *pil,
231 const u8 *metadata, size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700232{
233 return init_image_trusted(PAS_Q6, metadata, size);
234}
235
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700236static int init_image_modem_fw_q6_trusted(struct pil_device *pil,
237 const u8 *metadata, size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700238{
239 return init_image_trusted(PAS_MODEM_FW, metadata, size);
240}
241
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700242static int init_image_modem_sw_q6_trusted(struct pil_device *pil,
243 const u8 *metadata, size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700244{
245 return init_image_trusted(PAS_MODEM_SW, metadata, size);
246}
247
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700248static int init_image_lpass_q6_untrusted(struct pil_device *pil,
249 const u8 *metadata, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250{
251 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
252 q6_lpass.start_addr = ehdr->e_entry;
253 return 0;
254}
255
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700256static int init_image_modem_fw_q6_untrusted(struct pil_device *pil,
257 const u8 *metadata, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258{
259 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
260 q6_modem_fw.start_addr = ehdr->e_entry;
261 return 0;
262}
263
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700264static int init_image_modem_sw_q6_untrusted(struct pil_device *pil,
265 const u8 *metadata, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266{
267 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
268 q6_modem_sw.start_addr = ehdr->e_entry;
269 return 0;
270}
271
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700272static int verify_blob(struct pil_device *pil, u32 phy_addr, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273{
274 return 0;
275}
276
Stephen Boyd9802ca92011-05-25 15:09:59 -0700277static int auth_and_reset_trusted(int id)
278{
279 int ret;
280 struct pas_auth_image_req request;
281 struct pas_auth_image_resp resp = {0};
282
283 request.proc = id;
284 ret = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &request,
285 sizeof(request), &resp, sizeof(resp));
286 if (ret)
287 return ret;
288
289 return resp.reset_initiated;
290}
291
Stephen Boydb6b54852011-08-16 14:16:27 -0700292static int power_up_q6(struct q6_data *q6)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700293{
294 int err;
295
296 err = regulator_set_voltage(q6->vreg, 1050000, 1050000);
297 if (err) {
298 pr_err("Failed to set %s regulator's voltage.\n", q6->name);
299 return err;
300 }
Stephen Boydb6b54852011-08-16 14:16:27 -0700301 err = regulator_set_optimum_mode(q6->vreg, 100000);
302 if (err < 0) {
303 pr_err("Failed to set %s regulator's mode.\n", q6->name);
304 return err;
305 }
Stephen Boyd9802ca92011-05-25 15:09:59 -0700306 err = regulator_enable(q6->vreg);
307 if (err) {
308 pr_err("Failed to enable %s's regulator.\n", q6->name);
309 return err;
310 }
311 q6->vreg_enabled = true;
Stephen Boydb6b54852011-08-16 14:16:27 -0700312 return 0;
Stephen Boyd9802ca92011-05-25 15:09:59 -0700313}
314
Stephen Boydb6b54852011-08-16 14:16:27 -0700315static int reset_q6_trusted(int id, struct q6_data *q6)
316{
317 int err = power_up_q6(q6);
318 if (err)
319 return err;
320 return auth_and_reset_trusted(id);
321}
Stephen Boyd9802ca92011-05-25 15:09:59 -0700322
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700323static int reset_lpass_q6_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700324{
325 return reset_q6_trusted(PAS_Q6, &q6_lpass);
326}
327
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700328static int reset_modem_fw_q6_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700329{
330 return reset_q6_trusted(PAS_MODEM_FW, &q6_modem_fw);
331}
332
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700333static int reset_modem_sw_q6_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700334{
335 return reset_q6_trusted(PAS_MODEM_SW, &q6_modem_sw);
336}
337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338static int reset_q6_untrusted(struct q6_data *q6)
339{
340 u32 reg, err = 0;
341
Stephen Boydb6b54852011-08-16 14:16:27 -0700342 err = power_up_q6(q6);
343 if (err)
344 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 /* Enable Q6 ACLK */
346 writel_relaxed(0x10, q6->aclk_reg);
347
348 if (q6 == &q6_modem_fw || q6 == &q6_modem_sw) {
349 /* Enable MSS clocks */
350 writel_relaxed(0x10, SFAB_MSS_M_ACLK_CTL);
351 writel_relaxed(0x10, SFAB_MSS_S_HCLK_CTL);
352 writel_relaxed(0x10, MSS_S_HCLK_CTL);
353 writel_relaxed(0x10, MSS_SLP_CLK_CTL);
354 /* Wait for clocks to enable */
355 mb();
356 udelay(10);
357
358 /* Enable JTAG clocks */
359 /* TODO: Remove if/when Q6 software enables them? */
360 writel_relaxed(0x10, q6->jtag_clk_reg);
361
362 /* De-assert MSS reset */
363 writel_relaxed(0x0, MSS_RESET);
364 mb();
365 udelay(10);
366
367 /* Enable MSS */
368 writel_relaxed(0x7, mss_enable_reg);
369 }
370
371 /*
372 * Assert AXIS_ACLK_EN override to allow for correct updating of the
373 * QDSP6_CORE_STATE status bit. This is mandatory only for the SW Q6
374 * in 8960v1 and optional elsewhere.
375 */
376 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
377 reg |= Q6SS_AXIS_ACLK_EN;
378 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
379
380 /* Deassert Q6SS_SS_ARES */
381 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
382 reg &= ~(Q6SS_SS_ARES);
383 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
384
385 /* Program boot address */
386 writel_relaxed((q6->start_addr >> 8) & 0xFFFFFF,
387 q6->reg_base + QDSP6SS_RST_EVB);
388
389 /* Program TCM and AHB address ranges */
390 writel_relaxed(q6->strap_tcm_base, q6->reg_base + QDSP6SS_STRAP_TCM);
391 writel_relaxed(q6->strap_ahb_upper | q6->strap_ahb_lower,
392 q6->reg_base + QDSP6SS_STRAP_AHB);
393
394 /* Turn off Q6 core clock */
395 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
396 q6->reg_base + QDSP6SS_GFMUX_CTL);
397
398 /* Put memories to sleep */
399 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
400
401 /* Assert resets */
402 reg = readl_relaxed(q6->reg_base + QDSP6SS_RESET);
403 reg |= (Q6SS_CORE_ARES | Q6SS_ISDB_ARES | Q6SS_ETM_ARES
404 | Q6SS_STOP_CORE_ARES);
405 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
406
407 /* Wait 8 AHB cycles for Q6 to be fully reset (AHB = 1.5Mhz) */
408 mb();
409 usleep_range(20, 30);
410
411 /* Turn on Q6 memories */
412 reg = Q6SS_L2DATA_SLP_NRET_N | Q6SS_SLP_RET_N | Q6SS_L1TCM_SLP_NRET_N
413 | Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLEEP_NRET_N | Q6SS_ARR_STBY_N
414 | Q6SS_CLAMP_IO;
415 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
416
417 /* Turn on Q6 core clock */
418 reg = Q6SS_CLK_ENA | Q6SS_SRC_SWITCH_CLK_OVR;
419 writel_relaxed(reg, q6->reg_base + QDSP6SS_GFMUX_CTL);
420
421 /* Remove Q6SS_CLAMP_IO */
422 reg = readl_relaxed(q6->reg_base + QDSP6SS_PWR_CTL);
423 reg &= ~Q6SS_CLAMP_IO;
424 writel_relaxed(reg, q6->reg_base + QDSP6SS_PWR_CTL);
425
426 /* Bring Q6 core out of reset and start execution. */
427 writel_relaxed(0x0, q6->reg_base + QDSP6SS_RESET);
428
429 /*
430 * Re-enable auto-gating of AXIS_ACLK at lease one AXI clock cycle
431 * after resets are de-asserted.
432 */
433 mb();
434 usleep_range(1, 10);
435 reg = readl_relaxed(q6->reg_base + QDSP6SS_CGC_OVERRIDE);
436 reg &= ~Q6SS_AXIS_ACLK_EN;
437 writel_relaxed(reg, q6->reg_base + QDSP6SS_CGC_OVERRIDE);
438
Stephen Boydb6b54852011-08-16 14:16:27 -0700439 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440}
441
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700442static int reset_lpass_q6_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443{
444 return reset_q6_untrusted(&q6_lpass);
445}
446
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700447static int reset_modem_fw_q6_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448{
449 return reset_q6_untrusted(&q6_modem_fw);
450}
451
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700452static int reset_modem_sw_q6_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453{
454 return reset_q6_untrusted(&q6_modem_sw);
455}
456
Stephen Boyd9802ca92011-05-25 15:09:59 -0700457static int shutdown_trusted(int id)
458{
459 int ret;
460 struct pas_shutdown_req request;
461 struct pas_shutdown_resp resp = {0};
462
463 request.proc = id;
464 ret = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &request, sizeof(request),
465 &resp, sizeof(resp));
466 if (ret)
467 return ret;
468
469 return resp.success;
470}
471
472static int shutdown_q6_trusted(int id, struct q6_data *q6)
473{
474 int ret;
475
476 ret = shutdown_trusted(id);
Matt Wagantalldafcd3d2011-08-02 20:27:59 -0700477 if (ret)
478 return ret;
479
Stephen Boyd9802ca92011-05-25 15:09:59 -0700480 if (q6->vreg_enabled) {
481 regulator_disable(q6->vreg);
482 q6->vreg_enabled = false;
483 }
484
485 return ret;
486}
487
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700488static int shutdown_lpass_q6_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700489{
490 return shutdown_q6_trusted(PAS_Q6, &q6_lpass);
491}
492
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700493static int shutdown_modem_fw_q6_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700494{
495 return shutdown_q6_trusted(PAS_MODEM_FW, &q6_modem_fw);
496}
497
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700498static int shutdown_modem_sw_q6_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700499{
500 return shutdown_q6_trusted(PAS_MODEM_SW, &q6_modem_sw);
501}
502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503static int shutdown_q6_untrusted(struct q6_data *q6)
504{
505 u32 reg;
506
507 /* Turn off Q6 core clock */
508 writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
509 q6->reg_base + QDSP6SS_GFMUX_CTL);
510
511 /* Assert resets */
512 reg = (Q6SS_SS_ARES | Q6SS_CORE_ARES | Q6SS_ISDB_ARES
513 | Q6SS_ETM_ARES | Q6SS_STOP_CORE_ARES | Q6SS_PRIV_ARES);
514 writel_relaxed(reg, q6->reg_base + QDSP6SS_RESET);
515
516 /* Turn off Q6 memories */
517 writel_relaxed(Q6SS_CLAMP_IO, q6->reg_base + QDSP6SS_PWR_CTL);
518
519 /* Put Modem Subsystem back into reset when shutting down FWQ6 */
520 if (q6 == &q6_modem_fw)
521 writel_relaxed(0x1, MSS_RESET);
522
523 if (q6->vreg_enabled) {
524 regulator_disable(q6->vreg);
525 q6->vreg_enabled = false;
526 }
527
528 return 0;
529}
530
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700531static int shutdown_lpass_q6_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532{
533 return shutdown_q6_untrusted(&q6_lpass);
534}
535
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700536static int shutdown_modem_fw_q6_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537{
538 return shutdown_q6_untrusted(&q6_modem_fw);
539}
540
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700541static int shutdown_modem_sw_q6_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542{
543 return shutdown_q6_untrusted(&q6_modem_sw);
544}
545
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700546static int init_image_riva_untrusted(struct pil_device *pil, const u8 *metadata,
547 size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548{
549 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
550 riva_start = ehdr->e_entry;
551 return 0;
552}
553
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700554static int reset_riva_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555{
556 u32 reg;
557 bool xo;
558
559 /* Enable A2XB bridge */
560 reg = readl(RIVA_PMU_A2XB_CFG);
561 reg |= RIVA_PMU_A2XB_CFG_EN;
562 writel(reg, RIVA_PMU_A2XB_CFG);
563
564 /* Determine which XO to use */
565 reg = readl(RIVA_PMU_CFG);
566 xo = (reg & RIVA_PMU_CFG_IRIS_XO_MODE) == RIVA_PMU_CFG_IRIS_XO_MODE_48;
567
568 /* Program PLL 13 to 960 MHz */
569 reg = readl(RIVA_PLL_MODE);
570 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
571 writel(reg, RIVA_PLL_MODE);
572
573 if (xo)
574 writel(0x40000C00 | 40, RIVA_PLL_L_VAL);
575 else
576 writel(0x40000C00 | 50, RIVA_PLL_L_VAL);
577 writel(0, RIVA_PLL_M_VAL);
578 writel(1, RIVA_PLL_N_VAL);
579 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
580
581 reg = readl(RIVA_PLL_MODE);
582 reg &= ~(PLL_MODE_REF_XO_SEL);
583 reg |= xo ? PLL_MODE_REF_XO_SEL_RF : PLL_MODE_REF_XO_SEL_CXO;
584 writel(reg, RIVA_PLL_MODE);
585
586 /* Enable PLL 13 */
587 reg |= PLL_MODE_BYPASSNL;
588 writel(reg, RIVA_PLL_MODE);
589
590 usleep_range(10, 20);
591
592 reg |= PLL_MODE_RESET_N;
593 writel(reg, RIVA_PLL_MODE);
594 reg |= PLL_MODE_OUTCTRL;
595 writel(reg, RIVA_PLL_MODE);
596
597 /* Wait for PLL to settle */
598 usleep_range(50, 100);
599
600 /* Configure cCPU for 240 MHz */
601 reg = readl(RIVA_PMU_CLK_ROOT3);
602 if (readl(RIVA_PMU_ROOT_CLK_SEL) & RIVA_PMU_ROOT_CLK_SEL_3) {
603 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
604 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
605 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
606 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
607 } else {
608 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
609 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
610 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
611 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
612 }
613 writel(reg, RIVA_PMU_CLK_ROOT3);
614 reg |= RIVA_PMU_CLK_ROOT3_ENA;
615 writel(reg, RIVA_PMU_CLK_ROOT3);
616 reg = readl(RIVA_PMU_ROOT_CLK_SEL);
617 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
618 writel(reg, RIVA_PMU_ROOT_CLK_SEL);
619
620 /* Use the high vector table */
621 reg = readl(RIVA_PMU_CCPU_CTL);
622 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
623 writel(reg, RIVA_PMU_CCPU_CTL);
624
625 /* Set base memory address */
626 writel_relaxed(riva_start >> 16, RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
627
628 /* Clear warmboot bit indicating this is a cold boot */
629 reg = readl(RIVA_PMU_CFG);
630 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
631 writel(reg, RIVA_PMU_CFG);
632
633 /* Enable the cCPU clock */
634 reg = readl(RIVA_PMU_OVRD_VAL);
635 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
636 writel(reg, RIVA_PMU_OVRD_VAL);
637
638 /* Take cCPU out of reset */
639 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
640 writel(reg, RIVA_PMU_OVRD_VAL);
641
642 return 0;
643}
644
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700645static int shutdown_riva_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646{
647 u32 reg;
648 /* Put riva into reset */
649 reg = readl(RIVA_PMU_OVRD_VAL);
650 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
651 writel(reg, RIVA_PMU_OVRD_VAL);
652 return 0;
653}
654
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700655static int init_image_riva_trusted(struct pil_device *pil, const u8 *metadata,
656 size_t size)
Stephen Boydcc724232011-08-17 17:56:00 -0700657{
658 return init_image_trusted(PAS_RIVA, metadata, size);
659}
660
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700661static int reset_riva_trusted(struct pil_device *pil)
Stephen Boydcc724232011-08-17 17:56:00 -0700662{
663 return auth_and_reset_trusted(PAS_RIVA);
664}
665
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700666static int shutdown_riva_trusted(struct pil_device *pil)
Stephen Boydcc724232011-08-17 17:56:00 -0700667{
668 return shutdown_trusted(PAS_RIVA);
669}
670
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700671static int init_image_dsps_untrusted(struct pil_device *pil, const u8 *metadata,
672 size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673{
674 /* Bring memory and bus interface out of reset */
675 writel_relaxed(0x2, PPSS_RESET);
676 writel_relaxed(0x10, PPSS_HCLK_CTL);
677 return 0;
678}
679
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700680static int reset_dsps_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681{
682 writel_relaxed(0x10, PPSS_PROC_CLK_CTL);
683 /* Bring DSPS out of reset */
684 writel_relaxed(0x0, PPSS_RESET);
685 return 0;
686}
687
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700688static int shutdown_dsps_untrusted(struct pil_device *pil)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689{
690 writel_relaxed(0x2, PPSS_RESET);
691 writel_relaxed(0x0, PPSS_PROC_CLK_CTL);
692 return 0;
693}
694
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700695static int init_image_dsps_trusted(struct pil_device *pil, const u8 *metadata,
696 size_t size)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700697{
698 return init_image_trusted(PAS_DSPS, metadata, size);
699}
700
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700701static int reset_dsps_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700702{
703 return auth_and_reset_trusted(PAS_DSPS);
704}
705
Stephen Boyd5bd999a2011-08-02 18:50:57 -0700706static int shutdown_dsps_trusted(struct pil_device *pil)
Stephen Boyd9802ca92011-05-25 15:09:59 -0700707{
708 return shutdown_trusted(PAS_DSPS);
709}
710
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711static struct pil_reset_ops pil_modem_fw_q6_ops = {
712 .init_image = init_image_modem_fw_q6_untrusted,
713 .verify_blob = verify_blob,
714 .auth_and_reset = reset_modem_fw_q6_untrusted,
715 .shutdown = shutdown_modem_fw_q6_untrusted,
716};
717
718static struct pil_reset_ops pil_modem_sw_q6_ops = {
719 .init_image = init_image_modem_sw_q6_untrusted,
720 .verify_blob = verify_blob,
721 .auth_and_reset = reset_modem_sw_q6_untrusted,
722 .shutdown = shutdown_modem_sw_q6_untrusted,
723};
724
725static struct pil_reset_ops pil_lpass_q6_ops = {
726 .init_image = init_image_lpass_q6_untrusted,
727 .verify_blob = verify_blob,
728 .auth_and_reset = reset_lpass_q6_untrusted,
729 .shutdown = shutdown_lpass_q6_untrusted,
730};
731
732static struct pil_reset_ops pil_riva_ops = {
733 .init_image = init_image_riva_untrusted,
734 .verify_blob = verify_blob,
735 .auth_and_reset = reset_riva_untrusted,
736 .shutdown = shutdown_riva_untrusted,
737};
738
739struct pil_reset_ops pil_dsps_ops = {
740 .init_image = init_image_dsps_untrusted,
741 .verify_blob = verify_blob,
742 .auth_and_reset = reset_dsps_untrusted,
743 .shutdown = shutdown_dsps_untrusted,
744};
745
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700746static struct pil_device pil_lpass_q6 = {
747 .name = "q6",
748 .pdev = {
749 .name = "pil_lpass_q6",
750 .id = -1,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 },
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700752 .ops = &pil_lpass_q6_ops,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753};
754
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700755static struct pil_device pil_modem_fw_q6 = {
756 .name = "modem_fw",
757 .depends_on = "q6",
758 .pdev = {
759 .name = "pil_modem_fw_q6",
760 .id = -1,
761 },
762 .ops = &pil_modem_fw_q6_ops,
763};
764
765static struct pil_device pil_modem_sw_q6 = {
766 .name = "modem",
767 .depends_on = "modem_fw",
768 .pdev = {
769 .name = "pil_modem_sw_q6",
770 .id = -1,
771 },
772 .ops = &pil_modem_sw_q6_ops,
773};
774
775static struct pil_device pil_riva = {
776 .name = "wcnss",
777 .pdev = {
778 .name = "pil_riva",
779 .id = -1,
780 },
781 .ops = &pil_riva_ops,
782};
783
784static struct pil_device pil_dsps = {
785 .name = "dsps",
786 .pdev = {
787 .name = "pil_dsps",
788 .id = -1,
789 },
790 .ops = &pil_dsps_ops,
791};
792
793static int __init q6_reset_init(struct q6_data *q6)
794{
795 int err;
796
797 q6->reg_base = ioremap(q6->reg_base_phys, SZ_256);
798 if (!q6->reg_base) {
799 err = -ENOMEM;
800 goto err_map;
801 }
802
803 q6->vreg = regulator_get(NULL, q6->name);
804 if (IS_ERR(q6->vreg)) {
805 err = PTR_ERR(q6->vreg);
806 goto err_vreg;
807 }
808
809 return 0;
810
811err_vreg:
812 iounmap(q6->reg_base);
813err_map:
814 return err;
815}
816
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700817static int __init can_secure_boot(int id)
818{
819 int ret;
820 u32 periph = id;
821 u32 ret_val = 0;
822
823 ret = scm_call(SCM_SVC_PIL, PAS_IS_SUPPORTED_CMD, &periph,
824 sizeof(periph), &ret_val, sizeof(ret_val));
825 if (ret)
826 return ret;
827
828 return ret_val;
829}
830
Stephen Boyd9802ca92011-05-25 15:09:59 -0700831static bool secure_pil = true;
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700832
833static void __init use_secure_pil(void)
834{
835
836 if (scm_is_call_available(SCM_SVC_PIL, PAS_IS_SUPPORTED_CMD) <= 0)
837 return;
838
839 if (can_secure_boot(PAS_Q6) > 0) {
840 pil_lpass_q6_ops.init_image = init_image_lpass_q6_trusted;
841 pil_lpass_q6_ops.auth_and_reset = reset_lpass_q6_trusted;
842 pil_lpass_q6_ops.shutdown = shutdown_lpass_q6_trusted;
843 }
844
845 if (can_secure_boot(PAS_MODEM_FW) > 0) {
846 pil_modem_fw_q6_ops.init_image = init_image_modem_fw_q6_trusted;
847 pil_modem_fw_q6_ops.auth_and_reset = reset_modem_fw_q6_trusted;
848 pil_modem_fw_q6_ops.shutdown = shutdown_modem_fw_q6_trusted;
849 }
850
851 if (can_secure_boot(PAS_MODEM_SW) > 0) {
852 pil_modem_sw_q6_ops.init_image = init_image_modem_sw_q6_trusted;
853 pil_modem_sw_q6_ops.auth_and_reset = reset_modem_sw_q6_trusted;
854 pil_modem_sw_q6_ops.shutdown = shutdown_modem_sw_q6_trusted;
855 }
856
857 if (can_secure_boot(PAS_DSPS) > 0) {
858 pil_dsps_ops.init_image = init_image_dsps_trusted;
859 pil_dsps_ops.auth_and_reset = reset_dsps_trusted;
860 pil_dsps_ops.shutdown = shutdown_dsps_trusted;
861 }
862
863 if (can_secure_boot(PAS_RIVA) > 0) {
864 pil_riva_ops.init_image = init_image_riva_trusted;
865 pil_riva_ops.auth_and_reset = reset_riva_trusted;
866 pil_riva_ops.shutdown = shutdown_riva_trusted;
867 }
868}
Stephen Boyd9802ca92011-05-25 15:09:59 -0700869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870static int __init msm_peripheral_reset_init(void)
871{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872 int err;
873
874 /*
875 * Don't initialize PIL on simulated targets, as some
876 * subsystems may not be emulated on them.
877 */
878 if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
879 return 0;
880
Saravana Kannan76cc49b2011-09-07 19:59:04 -0700881 if (secure_pil)
882 use_secure_pil();
Stephen Boyd9802ca92011-05-25 15:09:59 -0700883
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700884 err = q6_reset_init(&q6_lpass);
885 if (err)
886 return err;
887 msm_pil_add_device(&pil_lpass_q6);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700889 mss_enable_reg = ioremap(MSM_MSS_ENABLE_PHYS, 4);
890 if (!mss_enable_reg)
891 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700893 err = q6_reset_init(&q6_modem_fw);
894 if (err) {
895 iounmap(mss_enable_reg);
896 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 }
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700898 msm_pil_add_device(&pil_modem_fw_q6);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700900 err = q6_reset_init(&q6_modem_sw);
901 if (err)
902 return err;
903 msm_pil_add_device(&pil_modem_sw_q6);
904
905 msm_pil_add_device(&pil_dsps);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906
907 msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
Matt Wagantall0b2f9822011-07-12 20:11:28 -0700908 if (!msm_riva_base)
909 return -ENOMEM;
910 msm_pil_add_device(&pil_riva);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911
912 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913}
914arch_initcall(msm_peripheral_reset_init);
Stephen Boyd9802ca92011-05-25 15:09:59 -0700915module_param(secure_pil, bool, S_IRUGO);
916MODULE_PARM_DESC(secure_pil, "Use Secure PIL");