blob: de61041ecce1cf78faadbef9ddab212c9de2302d [file] [log] [blame]
Stephen Boyd322a9922011-09-20 01:05:54 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
21
22#include <mach/msm_iomap.h>
Matt Wagantall04b7cc72011-12-09 18:52:26 -080023#include <mach/msm_xo.h>
Stephen Boyd322a9922011-09-20 01:05:54 -070024
25#include "peripheral-loader.h"
26#include "scm-pas.h"
27
Matt Wagantall04b7cc72011-12-09 18:52:26 -080028#define PROXY_VOTE_TIMEOUT 10000
29
Stephen Boyd322a9922011-09-20 01:05:54 -070030#define RIVA_PMU_A2XB_CFG 0xB8
31#define RIVA_PMU_A2XB_CFG_EN BIT(0)
32
33#define RIVA_PMU_CFG 0x28
34#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
35#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
36#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
37
38#define RIVA_PMU_OVRD_VAL 0x30
39#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
40#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
41
42#define RIVA_PMU_CCPU_CTL 0x9C
43#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
44#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
45
46#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR 0xA0
47
48#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
49#define PLL_MODE_OUTCTRL BIT(0)
50#define PLL_MODE_BYPASSNL BIT(1)
51#define PLL_MODE_RESET_N BIT(2)
52#define PLL_MODE_REF_XO_SEL 0x30
53#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
54#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
55#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
56#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
57#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
58#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
59#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
60
61#define RIVA_PMU_ROOT_CLK_SEL 0xC8
62#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
63
64#define RIVA_PMU_CLK_ROOT3 0x78
65#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
66#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
67#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
68#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
69#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
70#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
71#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
72#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
73#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
74
75struct riva_data {
76 void __iomem *base;
77 unsigned long start_addr;
Matt Wagantall04b7cc72011-12-09 18:52:26 -080078 struct msm_xo_voter *xo;
79 struct timer_list xo_timer;
Stephen Boyd322a9922011-09-20 01:05:54 -070080};
81
Matt Wagantall04b7cc72011-12-09 18:52:26 -080082static void pil_riva_make_xo_proxy_votes(struct device *dev)
83{
84 struct riva_data *drv = dev_get_drvdata(dev);
85
86 msm_xo_mode_vote(drv->xo, MSM_XO_MODE_ON);
87 mod_timer(&drv->xo_timer, jiffies+msecs_to_jiffies(PROXY_VOTE_TIMEOUT));
88}
89
90static void pil_riva_remove_xo_proxy_votes(unsigned long data)
91{
92 struct riva_data *drv = (struct riva_data *)data;
93
94 msm_xo_mode_vote(drv->xo, MSM_XO_MODE_OFF);
95}
96
97static void pil_riva_remove_xo_proxy_votes_now(struct device *dev)
98{
99 struct riva_data *drv = dev_get_drvdata(dev);
100
101 if (del_timer(&drv->xo_timer))
102 pil_riva_remove_xo_proxy_votes((unsigned long)drv);
103}
104
105static bool cxo_is_needed(struct riva_data *drv)
106{
107 u32 reg = readl_relaxed(drv->base + RIVA_PMU_CFG);
108 return (reg & RIVA_PMU_CFG_IRIS_XO_MODE)
109 != RIVA_PMU_CFG_IRIS_XO_MODE_48;
110}
111
Stephen Boyd322a9922011-09-20 01:05:54 -0700112static int nop_verify_blob(struct pil_desc *pil, u32 phy_addr, size_t size)
113{
114 return 0;
115}
116
117static int pil_riva_init_image(struct pil_desc *pil, const u8 *metadata,
118 size_t size)
119{
120 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
121 struct riva_data *drv = dev_get_drvdata(pil->dev);
122 drv->start_addr = ehdr->e_entry;
123 return 0;
124}
125
126static int pil_riva_reset(struct pil_desc *pil)
127{
128 u32 reg, sel;
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800129 bool use_cxo;
Stephen Boyd322a9922011-09-20 01:05:54 -0700130 struct riva_data *drv = dev_get_drvdata(pil->dev);
131 void __iomem *base = drv->base;
132 unsigned long start_addr = drv->start_addr;
133
134 /* Enable A2XB bridge */
135 reg = readl_relaxed(base + RIVA_PMU_A2XB_CFG);
136 reg |= RIVA_PMU_A2XB_CFG_EN;
137 writel_relaxed(reg, base + RIVA_PMU_A2XB_CFG);
138
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800139 /* Proxy-vote for CXO if it's needed */
140 use_cxo = cxo_is_needed(drv);
141 if (use_cxo)
142 pil_riva_make_xo_proxy_votes(pil->dev);
Stephen Boyd322a9922011-09-20 01:05:54 -0700143
144 /* Program PLL 13 to 960 MHz */
145 reg = readl_relaxed(RIVA_PLL_MODE);
146 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
147 writel_relaxed(reg, RIVA_PLL_MODE);
148
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800149 if (use_cxo)
Stephen Boyd322a9922011-09-20 01:05:54 -0700150 writel_relaxed(0x40000C00 | 50, RIVA_PLL_L_VAL);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800151 else
152 writel_relaxed(0x40000C00 | 40, RIVA_PLL_L_VAL);
Stephen Boyd322a9922011-09-20 01:05:54 -0700153 writel_relaxed(0, RIVA_PLL_M_VAL);
154 writel_relaxed(1, RIVA_PLL_N_VAL);
155 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
156
157 reg = readl_relaxed(RIVA_PLL_MODE);
158 reg &= ~(PLL_MODE_REF_XO_SEL);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800159 reg |= use_cxo ? PLL_MODE_REF_XO_SEL_CXO : PLL_MODE_REF_XO_SEL_RF;
Stephen Boyd322a9922011-09-20 01:05:54 -0700160 writel_relaxed(reg, RIVA_PLL_MODE);
161
162 /* Enable PLL 13 */
163 reg |= PLL_MODE_BYPASSNL;
164 writel_relaxed(reg, RIVA_PLL_MODE);
165
166 /*
167 * H/W requires a 5us delay between disabling the bypass and
168 * de-asserting the reset. Delay 10us just to be safe.
169 */
170 mb();
171 usleep_range(10, 20);
172
173 reg |= PLL_MODE_RESET_N;
174 writel_relaxed(reg, RIVA_PLL_MODE);
175 reg |= PLL_MODE_OUTCTRL;
176 writel_relaxed(reg, RIVA_PLL_MODE);
177
178 /* Wait for PLL to settle */
179 mb();
180 usleep_range(50, 100);
181
182 /* Configure cCPU for 240 MHz */
183 sel = readl_relaxed(base + RIVA_PMU_ROOT_CLK_SEL);
184 reg = readl_relaxed(base + RIVA_PMU_CLK_ROOT3);
185 if (sel & RIVA_PMU_ROOT_CLK_SEL_3) {
186 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
187 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
188 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
189 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
190 } else {
191 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
192 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
193 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
194 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
195 }
196 writel_relaxed(reg, base + RIVA_PMU_CLK_ROOT3);
197 reg |= RIVA_PMU_CLK_ROOT3_ENA;
198 writel_relaxed(reg, base + RIVA_PMU_CLK_ROOT3);
199 reg = readl_relaxed(base + RIVA_PMU_ROOT_CLK_SEL);
200 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
201 writel_relaxed(reg, base + RIVA_PMU_ROOT_CLK_SEL);
202
203 /* Use the high vector table */
204 reg = readl_relaxed(base + RIVA_PMU_CCPU_CTL);
205 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
206 writel_relaxed(reg, base + RIVA_PMU_CCPU_CTL);
207
208 /* Set base memory address */
209 writel_relaxed(start_addr >> 16, base + RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
210
211 /* Clear warmboot bit indicating this is a cold boot */
212 reg = readl_relaxed(base + RIVA_PMU_CFG);
213 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
214 writel_relaxed(reg, base + RIVA_PMU_CFG);
215
216 /* Enable the cCPU clock */
217 reg = readl_relaxed(base + RIVA_PMU_OVRD_VAL);
218 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
219 writel_relaxed(reg, base + RIVA_PMU_OVRD_VAL);
220
221 /* Take cCPU out of reset */
222 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
223 writel_relaxed(reg, base + RIVA_PMU_OVRD_VAL);
224
225 return 0;
226}
227
228static int pil_riva_shutdown(struct pil_desc *pil)
229{
230 struct riva_data *drv = dev_get_drvdata(pil->dev);
231 u32 reg;
232
233 reg = readl_relaxed(drv->base + RIVA_PMU_OVRD_VAL);
234 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
235 writel_relaxed(reg, drv->base + RIVA_PMU_OVRD_VAL);
236
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800237 pil_riva_remove_xo_proxy_votes_now(pil->dev);
238
Stephen Boyd322a9922011-09-20 01:05:54 -0700239 return 0;
240}
241
242static struct pil_reset_ops pil_riva_ops = {
243 .init_image = pil_riva_init_image,
244 .verify_blob = nop_verify_blob,
245 .auth_and_reset = pil_riva_reset,
246 .shutdown = pil_riva_shutdown,
247};
248
249static int pil_riva_init_image_trusted(struct pil_desc *pil,
250 const u8 *metadata, size_t size)
251{
252 return pas_init_image(PAS_RIVA, metadata, size);
253}
254
255static int pil_riva_reset_trusted(struct pil_desc *pil)
256{
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800257 struct riva_data *drv = dev_get_drvdata(pil->dev);
258
259 /* Proxy-vote for CXO if it's needed */
260 if (cxo_is_needed(drv))
261 pil_riva_make_xo_proxy_votes(pil->dev);
262
Stephen Boyd322a9922011-09-20 01:05:54 -0700263 return pas_auth_and_reset(PAS_RIVA);
264}
265
266static int pil_riva_shutdown_trusted(struct pil_desc *pil)
267{
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800268 int ret = pas_shutdown(PAS_RIVA);
269
270 pil_riva_remove_xo_proxy_votes_now(pil->dev);
271
272 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700273}
274
275static struct pil_reset_ops pil_riva_ops_trusted = {
276 .init_image = pil_riva_init_image_trusted,
277 .verify_blob = nop_verify_blob,
278 .auth_and_reset = pil_riva_reset_trusted,
279 .shutdown = pil_riva_shutdown_trusted,
280};
281
282static int __devinit pil_riva_probe(struct platform_device *pdev)
283{
284 struct riva_data *drv;
285 struct resource *res;
286 struct pil_desc *desc;
287
288 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
289 if (!res)
290 return -EINVAL;
291
292 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
293 if (!drv)
294 return -ENOMEM;
295 platform_set_drvdata(pdev, drv);
296
297 drv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
298 if (!drv->base)
299 return -ENOMEM;
300
301 desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
302 if (!desc)
303 return -ENOMEM;
304
305 desc->name = "wcnss";
306 desc->dev = &pdev->dev;
307
308 if (pas_supported(PAS_RIVA) > 0) {
309 desc->ops = &pil_riva_ops_trusted;
310 dev_info(&pdev->dev, "using secure boot\n");
311 } else {
312 desc->ops = &pil_riva_ops;
313 dev_info(&pdev->dev, "using non-secure boot\n");
314 }
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800315
316 setup_timer(&drv->xo_timer, pil_riva_remove_xo_proxy_votes,
317 (unsigned long)drv);
318 drv->xo = msm_xo_get(MSM_XO_CXO, desc->name);
319 if (IS_ERR(drv->xo))
320 return PTR_ERR(drv->xo);
321
Stephen Boyd322a9922011-09-20 01:05:54 -0700322 return msm_pil_register(desc);
323}
324
325static int __devexit pil_riva_remove(struct platform_device *pdev)
326{
327 return 0;
328}
329
330static struct platform_driver pil_riva_driver = {
331 .probe = pil_riva_probe,
332 .remove = __devexit_p(pil_riva_remove),
333 .driver = {
334 .name = "pil_riva",
335 .owner = THIS_MODULE,
336 },
337};
338
339static int __init pil_riva_init(void)
340{
341 return platform_driver_register(&pil_riva_driver);
342}
343module_init(pil_riva_init);
344
345static void __exit pil_riva_exit(void)
346{
347 platform_driver_unregister(&pil_riva_driver);
348}
349module_exit(pil_riva_exit);
350
351MODULE_DESCRIPTION("Support for booting RIVA (WCNSS) processors");
352MODULE_LICENSE("GPL v2");