blob: 7c372ee6fd4f3a52e9bbb18760331ac4b14d603f [file] [log] [blame]
Matt Wagantall4e2599e2012-03-21 22:31:35 -07001/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
20#include <linux/elf.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23#include <linux/clk.h>
24#include <linux/err.h>
25#include <linux/of.h>
26#include <linux/regulator/consumer.h>
27
28#include <mach/clk.h>
29
30#include "peripheral-loader.h"
31#include "pil-q6v5.h"
32
33/* Q6 Register Offsets */
34#define QDSP6SS_RST_EVB 0x010
35
36/* AXI Halting Registers */
37#define MSS_Q6_HALT_BASE 0x180
38#define MSS_MODEM_HALT_BASE 0x200
39#define MSS_NC_HALT_BASE 0x280
40
Matt Wagantall16bc5cc2012-08-09 21:33:23 -070041/* MSS_CLAMP_IO Register Value */
42#define MSS_IO_UNCLAMP_ALL 0x40
43
Matt Wagantall4e2599e2012-03-21 22:31:35 -070044/* RMB Status Register Values */
45#define STATUS_PBL_SUCCESS 0x1
46#define STATUS_XPU_UNLOCKED 0x1
47#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
48
49/* PBL/MBA interface registers */
50#define RMB_MBA_IMAGE 0x00
51#define RMB_PBL_STATUS 0x04
52#define RMB_MBA_STATUS 0x0C
53
Matt Wagantall4e2599e2012-03-21 22:31:35 -070054#define PROXY_TIMEOUT_MS 10000
55#define POLL_INTERVAL_US 50
56
Matt Wagantallf0141392012-08-16 18:53:53 -070057static int pbl_mba_boot_timeout_ms = 100;
58module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
59
Matt Wagantall4e2599e2012-03-21 22:31:35 -070060static int pil_mss_power_up(struct device *dev)
61{
62 int ret;
63 struct q6v5_data *drv = dev_get_drvdata(dev);
64
65 ret = regulator_enable(drv->vreg);
66 if (ret)
67 dev_err(dev, "Failed to enable regulator.\n");
68
69 return ret;
70}
71
72static int pil_mss_power_down(struct device *dev)
73{
74 struct q6v5_data *drv = dev_get_drvdata(dev);
75
76 return regulator_disable(drv->vreg);
77}
78
Matt Wagantall8c2246d2012-08-12 17:08:04 -070079static int pil_mss_enable_clks(struct q6v5_data *drv)
80{
81 int ret;
82
83 ret = clk_prepare_enable(drv->ahb_clk);
84 if (ret)
85 goto err_ahb_clk;
86 ret = clk_reset(drv->core_clk, CLK_RESET_DEASSERT);
87 if (ret)
88 goto err_reset;
89 ret = clk_prepare_enable(drv->core_clk);
90 if (ret)
91 goto err_core_clk;
92 ret = clk_prepare_enable(drv->axi_clk);
93 if (ret)
94 goto err_axi_clk;
95 ret = clk_prepare_enable(drv->reg_clk);
96 if (ret)
97 goto err_reg_clk;
98 ret = clk_prepare_enable(drv->rom_clk);
99 if (ret)
100 goto err_rom_clk;
101
102 return 0;
103
104err_rom_clk:
105 clk_disable_unprepare(drv->reg_clk);
106err_reg_clk:
107 clk_disable_unprepare(drv->axi_clk);
108err_axi_clk:
109 clk_disable_unprepare(drv->core_clk);
110err_core_clk:
111 clk_reset(drv->core_clk, CLK_RESET_ASSERT);
112err_reset:
113 clk_disable_unprepare(drv->ahb_clk);
114err_ahb_clk:
115 return ret;
116}
117
118static void pil_mss_disable_clks(struct q6v5_data *drv)
119{
120 clk_disable_unprepare(drv->rom_clk);
121 clk_disable_unprepare(drv->reg_clk);
122 clk_disable_unprepare(drv->axi_clk);
123 clk_disable_unprepare(drv->core_clk);
124 clk_reset(drv->core_clk, CLK_RESET_ASSERT);
125 clk_disable_unprepare(drv->ahb_clk);
126}
127
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700128static int wait_for_mba_ready(struct device *dev)
129{
130 struct q6v5_data *drv = dev_get_drvdata(dev);
131 int ret;
132 u32 status;
133
134 /* Wait for PBL completion. */
135 ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
Matt Wagantallf0141392012-08-16 18:53:53 -0700136 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700137 if (ret) {
138 dev_err(dev, "PBL boot timed out\n");
139 return ret;
140 }
141 if (status != STATUS_PBL_SUCCESS) {
142 dev_err(dev, "PBL returned unexpected status %d\n", status);
143 return -EINVAL;
144 }
145
146 /* Wait for MBA completion. */
147 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
Matt Wagantallf0141392012-08-16 18:53:53 -0700148 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700149 if (ret) {
150 dev_err(dev, "MBA boot timed out\n");
151 return ret;
152 }
153 if (status != STATUS_XPU_UNLOCKED &&
154 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
155 dev_err(dev, "MBA returned unexpected status %d\n", status);
156 return -EINVAL;
157 }
158
159 return 0;
160}
161
162static int pil_mss_shutdown(struct pil_desc *pil)
163{
164 struct q6v5_data *drv = dev_get_drvdata(pil->dev);
165
166 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE);
167 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE);
168 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE);
169
170 /*
171 * If the shutdown function is called before the reset function, clocks
172 * and power will not be enabled yet. Enable them here so that register
173 * writes performed during the shutdown succeed.
174 */
175 if (drv->is_booted == false) {
176 pil_mss_power_up(pil->dev);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700177 pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700178 }
179 pil_q6v5_shutdown(pil);
180
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700181 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700182 pil_mss_power_down(pil->dev);
183
184 writel_relaxed(1, drv->restart_reg);
185
186 drv->is_booted = false;
187
188 return 0;
189}
190
191static int pil_mss_reset(struct pil_desc *pil)
192{
193 struct q6v5_data *drv = dev_get_drvdata(pil->dev);
194 int ret;
195
Matt Wagantall33c2ec72012-07-26 20:26:57 -0700196 /* Deassert reset to subsystem and wait for propagation */
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700197 writel_relaxed(0, drv->restart_reg);
198 mb();
Matt Wagantall33c2ec72012-07-26 20:26:57 -0700199 udelay(2);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700200
201 /*
202 * Bring subsystem out of reset and enable required
203 * regulators and clocks.
204 */
205 ret = pil_mss_power_up(pil->dev);
206 if (ret)
207 goto err_power;
208
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700209 ret = pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700210 if (ret)
211 goto err_clks;
212
213 /* Program Image Address */
Matt Wagantallf11928a2012-07-27 15:47:59 -0700214 if (drv->self_auth) {
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700215 writel_relaxed(drv->start_addr, drv->rmb_base + RMB_MBA_IMAGE);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700216 /* Ensure write to RMB base occurs before reset is released. */
217 mb();
218 } else {
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700219 writel_relaxed((drv->start_addr >> 4) & 0x0FFFFFF0,
220 drv->reg_base + QDSP6SS_RST_EVB);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700221 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700222
Matt Wagantall16bc5cc2012-08-09 21:33:23 -0700223 /* De-assert MSS IO clamps */
224 writel_relaxed(MSS_IO_UNCLAMP_ALL, drv->io_clamp_reg);
225
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700226 ret = pil_q6v5_reset(pil);
227 if (ret)
228 goto err_q6v5_reset;
229
230 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
231 if (drv->self_auth) {
232 ret = wait_for_mba_ready(pil->dev);
233 if (ret)
234 goto err_auth;
235 }
236
237 drv->is_booted = true;
238
239 return 0;
240
241err_auth:
242 pil_q6v5_shutdown(pil);
243err_q6v5_reset:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700244 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700245err_clks:
246 pil_mss_power_down(pil->dev);
247err_power:
248 return ret;
249}
250
251static struct pil_reset_ops pil_mss_ops = {
252 .init_image = pil_q6v5_init_image,
253 .proxy_vote = pil_q6v5_make_proxy_votes,
254 .proxy_unvote = pil_q6v5_remove_proxy_votes,
255 .auth_and_reset = pil_mss_reset,
256 .shutdown = pil_mss_shutdown,
257};
258
259static int __devinit pil_mss_driver_probe(struct platform_device *pdev)
260{
261 struct q6v5_data *drv;
262 struct pil_desc *desc;
263 struct resource *res;
264 int ret;
265
266 desc = pil_q6v5_init(pdev);
267 if (IS_ERR(desc))
268 return PTR_ERR(desc);
269 drv = platform_get_drvdata(pdev);
270 if (drv == NULL)
271 return -ENODEV;
272
273 desc->ops = &pil_mss_ops;
274 desc->owner = THIS_MODULE;
275 desc->proxy_timeout = PROXY_TIMEOUT_MS;
276
277 of_property_read_u32(pdev->dev.of_node, "qcom,pil-self-auth",
278 &drv->self_auth);
279 if (drv->self_auth) {
280 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
281 drv->rmb_base = devm_ioremap(&pdev->dev, res->start,
282 resource_size(res));
283 if (!drv->rmb_base)
284 return -ENOMEM;
285 }
286
287 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
288 drv->restart_reg = devm_ioremap(&pdev->dev, res->start,
289 resource_size(res));
290 if (!drv->restart_reg)
291 return -ENOMEM;
292
Matt Wagantall16bc5cc2012-08-09 21:33:23 -0700293 res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
294 drv->io_clamp_reg = devm_ioremap(&pdev->dev, res->start,
295 resource_size(res));
296 if (!drv->io_clamp_reg)
297 return -ENOMEM;
298
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700299 drv->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
300 if (IS_ERR(drv->vreg))
301 return PTR_ERR(drv->vreg);
302
Matt Wagantall5d929a82012-08-02 11:36:15 -0700303 ret = regulator_set_voltage(drv->vreg, 1050000, 1050000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700304 if (ret)
305 dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
306
307 ret = regulator_set_optimum_mode(drv->vreg, 100000);
308 if (ret < 0) {
309 dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
310 return ret;
311 }
312
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700313 drv->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
314 if (IS_ERR(drv->ahb_clk))
315 return PTR_ERR(drv->ahb_clk);
316
317 drv->core_clk = devm_clk_get(&pdev->dev, "core_clk");
318 if (IS_ERR(drv->core_clk))
319 return PTR_ERR(drv->core_clk);
320
321 drv->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
322 if (IS_ERR(drv->axi_clk))
323 return PTR_ERR(drv->axi_clk);
324
325 drv->reg_clk = devm_clk_get(&pdev->dev, "reg_clk");
326 if (IS_ERR(drv->reg_clk))
327 return PTR_ERR(drv->reg_clk);
328
329 drv->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
330 if (IS_ERR(drv->rom_clk))
331 return PTR_ERR(drv->rom_clk);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700332
333 drv->pil = msm_pil_register(desc);
334 if (IS_ERR(drv->pil))
335 return PTR_ERR(drv->pil);
336
337 return 0;
338}
339
340static int __devexit pil_mss_driver_exit(struct platform_device *pdev)
341{
342 struct q6v5_data *drv = platform_get_drvdata(pdev);
343 msm_pil_unregister(drv->pil);
344 return 0;
345}
346
347static struct of_device_id mss_match_table[] = {
348 { .compatible = "qcom,pil-q6v5-mss" },
349 {}
350};
351
352static struct platform_driver pil_mss_driver = {
353 .probe = pil_mss_driver_probe,
354 .remove = __devexit_p(pil_mss_driver_exit),
355 .driver = {
356 .name = "pil-q6v5-mss",
357 .of_match_table = mss_match_table,
358 .owner = THIS_MODULE,
359 },
360};
361
362static int __init pil_mss_init(void)
363{
364 return platform_driver_register(&pil_mss_driver);
365}
366module_init(pil_mss_init);
367
368static void __exit pil_mss_exit(void)
369{
370 platform_driver_unregister(&pil_mss_driver);
371}
372module_exit(pil_mss_exit);
373
374MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
375MODULE_LICENSE("GPL v2");