blob: bd49fc0a429cb4b4b2f91fc5538e88feb7928a2c [file] [log] [blame]
Stephen Boyd12332572011-12-06 16:00:51 -08001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Stephen Boyd322a9922011-09-20 01:05:54 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/elf.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -080021#include <linux/regulator/consumer.h>
22#include <linux/workqueue.h>
Stephen Boyd86f2e652012-01-11 18:25:44 -080023#include <linux/clk.h>
Stephen Boyd322a9922011-09-20 01:05:54 -070024
25#include <mach/msm_iomap.h>
26
27#include "peripheral-loader.h"
28#include "scm-pas.h"
29
Matt Wagantall04b7cc72011-12-09 18:52:26 -080030#define PROXY_VOTE_TIMEOUT 10000
31
Stephen Boyd322a9922011-09-20 01:05:54 -070032#define RIVA_PMU_A2XB_CFG 0xB8
33#define RIVA_PMU_A2XB_CFG_EN BIT(0)
34
35#define RIVA_PMU_CFG 0x28
36#define RIVA_PMU_CFG_WARM_BOOT BIT(0)
37#define RIVA_PMU_CFG_IRIS_XO_MODE 0x6
38#define RIVA_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
39
Stephen Boyd12332572011-12-06 16:00:51 -080040#define RIVA_PMU_OVRD_EN 0x2C
41#define RIVA_PMU_OVRD_EN_CCPU_RESET BIT(0)
42#define RIVA_PMU_OVRD_EN_CCPU_CLK BIT(1)
43
Stephen Boyd322a9922011-09-20 01:05:54 -070044#define RIVA_PMU_OVRD_VAL 0x30
45#define RIVA_PMU_OVRD_VAL_CCPU_RESET BIT(0)
46#define RIVA_PMU_OVRD_VAL_CCPU_CLK BIT(1)
47
48#define RIVA_PMU_CCPU_CTL 0x9C
49#define RIVA_PMU_CCPU_CTL_HIGH_IVT BIT(0)
50#define RIVA_PMU_CCPU_CTL_REMAP_EN BIT(2)
51
52#define RIVA_PMU_CCPU_BOOT_REMAP_ADDR 0xA0
53
54#define RIVA_PLL_MODE (MSM_CLK_CTL_BASE + 0x31A0)
55#define PLL_MODE_OUTCTRL BIT(0)
56#define PLL_MODE_BYPASSNL BIT(1)
57#define PLL_MODE_RESET_N BIT(2)
58#define PLL_MODE_REF_XO_SEL 0x30
59#define PLL_MODE_REF_XO_SEL_CXO (2 << 4)
60#define PLL_MODE_REF_XO_SEL_RF (3 << 4)
61#define RIVA_PLL_L_VAL (MSM_CLK_CTL_BASE + 0x31A4)
62#define RIVA_PLL_M_VAL (MSM_CLK_CTL_BASE + 0x31A8)
63#define RIVA_PLL_N_VAL (MSM_CLK_CTL_BASE + 0x31Ac)
64#define RIVA_PLL_CONFIG (MSM_CLK_CTL_BASE + 0x31B4)
65#define RIVA_PLL_STATUS (MSM_CLK_CTL_BASE + 0x31B8)
Stephen Boyd12332572011-12-06 16:00:51 -080066#define RIVA_RESET (MSM_CLK_CTL_BASE + 0x35E0)
Stephen Boyd322a9922011-09-20 01:05:54 -070067
68#define RIVA_PMU_ROOT_CLK_SEL 0xC8
69#define RIVA_PMU_ROOT_CLK_SEL_3 BIT(2)
70
71#define RIVA_PMU_CLK_ROOT3 0x78
72#define RIVA_PMU_CLK_ROOT3_ENA BIT(0)
73#define RIVA_PMU_CLK_ROOT3_SRC0_DIV 0x3C
74#define RIVA_PMU_CLK_ROOT3_SRC0_DIV_2 (1 << 2)
75#define RIVA_PMU_CLK_ROOT3_SRC0_SEL 0x1C0
76#define RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA (1 << 6)
77#define RIVA_PMU_CLK_ROOT3_SRC1_DIV 0x1E00
78#define RIVA_PMU_CLK_ROOT3_SRC1_DIV_2 (1 << 9)
79#define RIVA_PMU_CLK_ROOT3_SRC1_SEL 0xE000
80#define RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA (1 << 13)
81
82struct riva_data {
83 void __iomem *base;
84 unsigned long start_addr;
Stephen Boyd86f2e652012-01-11 18:25:44 -080085 struct clk *xo;
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -080086 bool use_cxo;
87 struct delayed_work work;
88 struct regulator *pll_supply;
Stephen Boyd322a9922011-09-20 01:05:54 -070089};
90
Stephen Boydd0b993a2012-01-30 11:59:31 -080091static int pil_riva_make_proxy_votes(struct device *dev)
Matt Wagantall04b7cc72011-12-09 18:52:26 -080092{
93 struct riva_data *drv = dev_get_drvdata(dev);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -080094 int ret;
Matt Wagantall04b7cc72011-12-09 18:52:26 -080095
Stephen Boydd0b993a2012-01-30 11:59:31 -080096 ret = regulator_enable(drv->pll_supply);
97 if (ret) {
98 dev_err(dev, "failed to enable pll supply\n");
99 goto err;
100 }
Stephen Boyd86f2e652012-01-11 18:25:44 -0800101 if (drv->use_cxo) {
102 ret = clk_prepare_enable(drv->xo);
Stephen Boydd0b993a2012-01-30 11:59:31 -0800103 if (ret) {
Stephen Boyd86f2e652012-01-11 18:25:44 -0800104 dev_err(dev, "failed to enable xo\n");
Stephen Boydd0b993a2012-01-30 11:59:31 -0800105 goto err_clk;
106 }
Stephen Boyd86f2e652012-01-11 18:25:44 -0800107 }
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800108 schedule_delayed_work(&drv->work, msecs_to_jiffies(PROXY_VOTE_TIMEOUT));
Stephen Boydd0b993a2012-01-30 11:59:31 -0800109 return 0;
110err_clk:
111 regulator_disable(drv->pll_supply);
112err:
113 return ret;
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800114}
115
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800116static void pil_riva_remove_proxy_votes(struct work_struct *work)
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800117{
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800118 struct riva_data *drv = container_of(work, struct riva_data, work.work);
119 regulator_disable(drv->pll_supply);
120 if (drv->use_cxo)
Stephen Boyd86f2e652012-01-11 18:25:44 -0800121 clk_disable_unprepare(drv->xo);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800122}
123
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800124static void pil_riva_remove_proxy_votes_now(struct device *dev)
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800125{
126 struct riva_data *drv = dev_get_drvdata(dev);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800127 flush_delayed_work(&drv->work);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800128}
129
Stephen Boyd322a9922011-09-20 01:05:54 -0700130static int nop_verify_blob(struct pil_desc *pil, u32 phy_addr, size_t size)
131{
132 return 0;
133}
134
135static int pil_riva_init_image(struct pil_desc *pil, const u8 *metadata,
136 size_t size)
137{
138 const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
139 struct riva_data *drv = dev_get_drvdata(pil->dev);
140 drv->start_addr = ehdr->e_entry;
141 return 0;
142}
143
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800144static bool cxo_is_needed(struct riva_data *drv)
145{
146 u32 reg = readl_relaxed(drv->base + RIVA_PMU_CFG);
147 return (reg & RIVA_PMU_CFG_IRIS_XO_MODE)
148 != RIVA_PMU_CFG_IRIS_XO_MODE_48;
149}
150
Stephen Boyd322a9922011-09-20 01:05:54 -0700151static int pil_riva_reset(struct pil_desc *pil)
152{
153 u32 reg, sel;
Stephen Boyd322a9922011-09-20 01:05:54 -0700154 struct riva_data *drv = dev_get_drvdata(pil->dev);
155 void __iomem *base = drv->base;
156 unsigned long start_addr = drv->start_addr;
Stephen Boyd86f2e652012-01-11 18:25:44 -0800157 int ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700158
Stephen Boyd86f2e652012-01-11 18:25:44 -0800159 ret = clk_prepare_enable(drv->xo);
160 if (ret)
161 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700162 /* Enable A2XB bridge */
163 reg = readl_relaxed(base + RIVA_PMU_A2XB_CFG);
164 reg |= RIVA_PMU_A2XB_CFG_EN;
165 writel_relaxed(reg, base + RIVA_PMU_A2XB_CFG);
166
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800167 drv->use_cxo = cxo_is_needed(drv);
Stephen Boydd0b993a2012-01-30 11:59:31 -0800168 ret = pil_riva_make_proxy_votes(pil->dev);
169 if (ret) {
170 reg &= ~RIVA_PMU_A2XB_CFG_EN;
171 writel_relaxed(reg, base + RIVA_PMU_A2XB_CFG);
172 mb();
173 clk_disable_unprepare(drv->xo);
174 return ret;
175 }
Stephen Boyd322a9922011-09-20 01:05:54 -0700176
177 /* Program PLL 13 to 960 MHz */
178 reg = readl_relaxed(RIVA_PLL_MODE);
179 reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
180 writel_relaxed(reg, RIVA_PLL_MODE);
181
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800182 if (drv->use_cxo)
Stephen Boyd322a9922011-09-20 01:05:54 -0700183 writel_relaxed(0x40000C00 | 50, RIVA_PLL_L_VAL);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800184 else
185 writel_relaxed(0x40000C00 | 40, RIVA_PLL_L_VAL);
Stephen Boyd322a9922011-09-20 01:05:54 -0700186 writel_relaxed(0, RIVA_PLL_M_VAL);
187 writel_relaxed(1, RIVA_PLL_N_VAL);
188 writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
189
190 reg = readl_relaxed(RIVA_PLL_MODE);
191 reg &= ~(PLL_MODE_REF_XO_SEL);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800192 reg |= drv->use_cxo ? PLL_MODE_REF_XO_SEL_CXO : PLL_MODE_REF_XO_SEL_RF;
Stephen Boyd322a9922011-09-20 01:05:54 -0700193 writel_relaxed(reg, RIVA_PLL_MODE);
194
195 /* Enable PLL 13 */
196 reg |= PLL_MODE_BYPASSNL;
197 writel_relaxed(reg, RIVA_PLL_MODE);
198
199 /*
200 * H/W requires a 5us delay between disabling the bypass and
201 * de-asserting the reset. Delay 10us just to be safe.
202 */
203 mb();
204 usleep_range(10, 20);
205
206 reg |= PLL_MODE_RESET_N;
207 writel_relaxed(reg, RIVA_PLL_MODE);
208 reg |= PLL_MODE_OUTCTRL;
209 writel_relaxed(reg, RIVA_PLL_MODE);
210
211 /* Wait for PLL to settle */
212 mb();
213 usleep_range(50, 100);
214
215 /* Configure cCPU for 240 MHz */
216 sel = readl_relaxed(base + RIVA_PMU_ROOT_CLK_SEL);
217 reg = readl_relaxed(base + RIVA_PMU_CLK_ROOT3);
218 if (sel & RIVA_PMU_ROOT_CLK_SEL_3) {
219 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
220 RIVA_PMU_CLK_ROOT3_SRC0_DIV);
221 reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
222 RIVA_PMU_CLK_ROOT3_SRC0_DIV_2;
223 } else {
224 reg &= ~(RIVA_PMU_CLK_ROOT3_SRC1_SEL |
225 RIVA_PMU_CLK_ROOT3_SRC1_DIV);
226 reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
227 RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
228 }
229 writel_relaxed(reg, base + RIVA_PMU_CLK_ROOT3);
230 reg |= RIVA_PMU_CLK_ROOT3_ENA;
231 writel_relaxed(reg, base + RIVA_PMU_CLK_ROOT3);
232 reg = readl_relaxed(base + RIVA_PMU_ROOT_CLK_SEL);
233 reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
234 writel_relaxed(reg, base + RIVA_PMU_ROOT_CLK_SEL);
235
236 /* Use the high vector table */
237 reg = readl_relaxed(base + RIVA_PMU_CCPU_CTL);
238 reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
239 writel_relaxed(reg, base + RIVA_PMU_CCPU_CTL);
240
241 /* Set base memory address */
242 writel_relaxed(start_addr >> 16, base + RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
243
244 /* Clear warmboot bit indicating this is a cold boot */
245 reg = readl_relaxed(base + RIVA_PMU_CFG);
246 reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
247 writel_relaxed(reg, base + RIVA_PMU_CFG);
248
249 /* Enable the cCPU clock */
250 reg = readl_relaxed(base + RIVA_PMU_OVRD_VAL);
251 reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
252 writel_relaxed(reg, base + RIVA_PMU_OVRD_VAL);
253
254 /* Take cCPU out of reset */
255 reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
256 writel_relaxed(reg, base + RIVA_PMU_OVRD_VAL);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800257 clk_disable_unprepare(drv->xo);
Stephen Boyd322a9922011-09-20 01:05:54 -0700258
259 return 0;
260}
261
262static int pil_riva_shutdown(struct pil_desc *pil)
263{
264 struct riva_data *drv = dev_get_drvdata(pil->dev);
265 u32 reg;
Stephen Boyd86f2e652012-01-11 18:25:44 -0800266 int ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700267
Stephen Boyd86f2e652012-01-11 18:25:44 -0800268 ret = clk_prepare_enable(drv->xo);
269 if (ret)
270 return ret;
Stephen Boyd12332572011-12-06 16:00:51 -0800271 /* Put cCPU and cCPU clock into reset */
Stephen Boyd322a9922011-09-20 01:05:54 -0700272 reg = readl_relaxed(drv->base + RIVA_PMU_OVRD_VAL);
273 reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
274 writel_relaxed(reg, drv->base + RIVA_PMU_OVRD_VAL);
Stephen Boyd12332572011-12-06 16:00:51 -0800275 reg = readl_relaxed(drv->base + RIVA_PMU_OVRD_EN);
276 reg |= RIVA_PMU_OVRD_EN_CCPU_RESET | RIVA_PMU_OVRD_EN_CCPU_CLK;
277 writel_relaxed(reg, drv->base + RIVA_PMU_OVRD_EN);
278 mb();
279
280 /* Assert reset to Riva */
281 writel_relaxed(1, RIVA_RESET);
282 mb();
283 usleep_range(1000, 2000);
284
285 /* Deassert reset to Riva */
286 writel_relaxed(0, RIVA_RESET);
287 mb();
Stephen Boyd322a9922011-09-20 01:05:54 -0700288
Stephen Boyd86f2e652012-01-11 18:25:44 -0800289 clk_disable_unprepare(drv->xo);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800290 pil_riva_remove_proxy_votes_now(pil->dev);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800291
Stephen Boyd322a9922011-09-20 01:05:54 -0700292 return 0;
293}
294
295static struct pil_reset_ops pil_riva_ops = {
296 .init_image = pil_riva_init_image,
297 .verify_blob = nop_verify_blob,
298 .auth_and_reset = pil_riva_reset,
299 .shutdown = pil_riva_shutdown,
300};
301
302static int pil_riva_init_image_trusted(struct pil_desc *pil,
303 const u8 *metadata, size_t size)
304{
305 return pas_init_image(PAS_RIVA, metadata, size);
306}
307
308static int pil_riva_reset_trusted(struct pil_desc *pil)
309{
Stephen Boyd86f2e652012-01-11 18:25:44 -0800310 struct riva_data *drv = dev_get_drvdata(pil->dev);
311 int ret;
312
313 ret = clk_prepare_enable(drv->xo);
314 if (ret)
315 return ret;
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800316 /* Proxy-vote for resources RIVA needs */
Stephen Boydd0b993a2012-01-30 11:59:31 -0800317 ret = pil_riva_make_proxy_votes(pil->dev);
318 if (!ret)
319 ret = pas_auth_and_reset(PAS_RIVA);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800320 clk_disable_unprepare(drv->xo);
321 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700322}
323
324static int pil_riva_shutdown_trusted(struct pil_desc *pil)
325{
Stephen Boyd86f2e652012-01-11 18:25:44 -0800326 int ret;
327 struct riva_data *drv = dev_get_drvdata(pil->dev);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800328
Stephen Boyd86f2e652012-01-11 18:25:44 -0800329 ret = clk_prepare_enable(drv->xo);
330 if (ret)
331 return ret;
332 ret = pas_shutdown(PAS_RIVA);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800333 pil_riva_remove_proxy_votes_now(pil->dev);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800334 clk_disable_unprepare(drv->xo);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800335
336 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700337}
338
339static struct pil_reset_ops pil_riva_ops_trusted = {
340 .init_image = pil_riva_init_image_trusted,
341 .verify_blob = nop_verify_blob,
342 .auth_and_reset = pil_riva_reset_trusted,
343 .shutdown = pil_riva_shutdown_trusted,
344};
345
346static int __devinit pil_riva_probe(struct platform_device *pdev)
347{
348 struct riva_data *drv;
349 struct resource *res;
350 struct pil_desc *desc;
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800351 int ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700352
353 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
354 if (!res)
355 return -EINVAL;
356
357 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
358 if (!drv)
359 return -ENOMEM;
360 platform_set_drvdata(pdev, drv);
361
362 drv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
363 if (!drv->base)
364 return -ENOMEM;
365
366 desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
367 if (!desc)
368 return -ENOMEM;
369
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800370 drv->pll_supply = regulator_get(&pdev->dev, "pll_vdd");
371 if (IS_ERR(drv->pll_supply)) {
372 dev_err(&pdev->dev, "failed to get pll supply\n");
373 return PTR_ERR(drv->pll_supply);
374 }
Matt Wagantall52dd0622012-02-02 18:26:16 -0800375 if (regulator_count_voltages(drv->pll_supply) > 0) {
376 ret = regulator_set_voltage(drv->pll_supply, 1800000, 1800000);
377 if (ret) {
378 dev_err(&pdev->dev,
379 "failed to set pll supply voltage\n");
380 goto err;
381 }
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800382
Matt Wagantall52dd0622012-02-02 18:26:16 -0800383 ret = regulator_set_optimum_mode(drv->pll_supply, 100000);
384 if (ret < 0) {
385 dev_err(&pdev->dev,
386 "failed to set pll supply optimum mode\n");
387 goto err;
388 }
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800389 }
390
Stephen Boyd322a9922011-09-20 01:05:54 -0700391 desc->name = "wcnss";
392 desc->dev = &pdev->dev;
393
394 if (pas_supported(PAS_RIVA) > 0) {
395 desc->ops = &pil_riva_ops_trusted;
396 dev_info(&pdev->dev, "using secure boot\n");
397 } else {
398 desc->ops = &pil_riva_ops;
399 dev_info(&pdev->dev, "using non-secure boot\n");
400 }
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800401
Stephen Boyd86f2e652012-01-11 18:25:44 -0800402 drv->xo = clk_get(&pdev->dev, "cxo");
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800403 if (IS_ERR(drv->xo)) {
404 ret = PTR_ERR(drv->xo);
405 goto err;
406 }
407 INIT_DELAYED_WORK(&drv->work, pil_riva_remove_proxy_votes);
Matt Wagantall04b7cc72011-12-09 18:52:26 -0800408
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800409 ret = msm_pil_register(desc);
410 if (ret)
411 goto err_register;
412 return 0;
413err_register:
Stephen Boydc01858c2012-01-30 17:03:42 -0800414 flush_delayed_work_sync(&drv->work);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800415 clk_put(drv->xo);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800416err:
417 regulator_put(drv->pll_supply);
418 return ret;
Stephen Boyd322a9922011-09-20 01:05:54 -0700419}
420
421static int __devexit pil_riva_remove(struct platform_device *pdev)
422{
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800423 struct riva_data *drv = platform_get_drvdata(pdev);
Stephen Boydc01858c2012-01-30 17:03:42 -0800424 flush_delayed_work_sync(&drv->work);
Stephen Boyd86f2e652012-01-11 18:25:44 -0800425 clk_put(drv->xo);
Stephen Boyd3bbdf6c2011-12-21 16:02:26 -0800426 regulator_put(drv->pll_supply);
Stephen Boyd322a9922011-09-20 01:05:54 -0700427 return 0;
428}
429
430static struct platform_driver pil_riva_driver = {
431 .probe = pil_riva_probe,
432 .remove = __devexit_p(pil_riva_remove),
433 .driver = {
434 .name = "pil_riva",
435 .owner = THIS_MODULE,
436 },
437};
438
439static int __init pil_riva_init(void)
440{
441 return platform_driver_register(&pil_riva_driver);
442}
443module_init(pil_riva_init);
444
445static void __exit pil_riva_exit(void)
446{
447 platform_driver_unregister(&pil_riva_driver);
448}
449module_exit(pil_riva_exit);
450
451MODULE_DESCRIPTION("Support for booting RIVA (WCNSS) processors");
452MODULE_LICENSE("GPL v2");