blob: 4e488d7bb3d004c9be90b633386de8c21e36b938 [file] [log] [blame]
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08001/* linux/arch/arm/mach-msm/dma.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Pankaj Kumar253bafc2012-03-07 16:32:51 +05304 * Copyright (c) 2008-2010, 2012 Code Aurora Forum. All rights reserved.
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Arve Hjønnevågc5541072009-06-25 17:03:14 -070017#include <linux/clk.h>
18#include <linux/err.h>
Russell Kingfced80c2008-09-06 12:10:45 +010019#include <linux/io.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080020#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/spinlock.h>
24#include <linux/pm_runtime.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010025#include <mach/dma.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#define MODULE_NAME "msm_dmov"
28
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080029#define MSM_DMOV_CHANNEL_COUNT 16
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#define MSM_DMOV_CRCI_COUNT 16
31
32enum {
33 CLK_DIS,
34 CLK_TO_BE_DIS,
35 CLK_EN
36};
37
38struct msm_dmov_ci_conf {
39 int start;
40 int end;
41 int burst;
42};
43
44struct msm_dmov_crci_conf {
45 int sd;
46 int blk_size;
47};
48
49struct msm_dmov_chan_conf {
50 int sd;
51 int block;
52 int priority;
53};
54
55struct msm_dmov_conf {
56 void *base;
57 struct msm_dmov_crci_conf *crci_conf;
58 struct msm_dmov_chan_conf *chan_conf;
59 int channel_active;
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -070060 int sd;
61 size_t sd_size;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -070062 struct list_head staged_commands[MSM_DMOV_CHANNEL_COUNT];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
64 struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -070065 struct mutex lock;
66 spinlock_t list_lock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 unsigned int irq;
68 struct clk *clk;
69 struct clk *pclk;
70 struct clk *ebiclk;
71 unsigned int clk_ctl;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -070072 struct delayed_work work;
73 struct workqueue_struct *cmd_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074};
75
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -070076static void msm_dmov_clock_work(struct work_struct *);
Aparna Mallavarapu08840532012-05-19 23:54:47 +053077static int msm_dmov_clk_toggle(int, int);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078
79#ifdef CONFIG_ARCH_MSM8X60
80
81#define DMOV_CHANNEL_DEFAULT_CONF { .sd = 1, .block = 0, .priority = 0 }
82#define DMOV_CHANNEL_MODEM_CONF { .sd = 3, .block = 0, .priority = 0 }
83#define DMOV_CHANNEL_CONF(secd, blk, pri) \
84 { .sd = secd, .block = blk, .priority = pri }
85
86static struct msm_dmov_chan_conf adm0_chan_conf[] = {
87 DMOV_CHANNEL_DEFAULT_CONF,
88 DMOV_CHANNEL_DEFAULT_CONF,
89 DMOV_CHANNEL_DEFAULT_CONF,
90 DMOV_CHANNEL_DEFAULT_CONF,
91 DMOV_CHANNEL_DEFAULT_CONF,
92 DMOV_CHANNEL_DEFAULT_CONF,
93 DMOV_CHANNEL_DEFAULT_CONF,
94 DMOV_CHANNEL_DEFAULT_CONF,
95 DMOV_CHANNEL_DEFAULT_CONF,
96 DMOV_CHANNEL_DEFAULT_CONF,
97 DMOV_CHANNEL_MODEM_CONF,
98 DMOV_CHANNEL_MODEM_CONF,
99 DMOV_CHANNEL_MODEM_CONF,
100 DMOV_CHANNEL_MODEM_CONF,
101 DMOV_CHANNEL_MODEM_CONF,
102 DMOV_CHANNEL_DEFAULT_CONF,
103};
104
105static struct msm_dmov_chan_conf adm1_chan_conf[] = {
106 DMOV_CHANNEL_DEFAULT_CONF,
107 DMOV_CHANNEL_DEFAULT_CONF,
108 DMOV_CHANNEL_DEFAULT_CONF,
109 DMOV_CHANNEL_DEFAULT_CONF,
110 DMOV_CHANNEL_DEFAULT_CONF,
111 DMOV_CHANNEL_DEFAULT_CONF,
112 DMOV_CHANNEL_DEFAULT_CONF,
113 DMOV_CHANNEL_DEFAULT_CONF,
114 DMOV_CHANNEL_DEFAULT_CONF,
115 DMOV_CHANNEL_DEFAULT_CONF,
116 DMOV_CHANNEL_MODEM_CONF,
117 DMOV_CHANNEL_MODEM_CONF,
118 DMOV_CHANNEL_MODEM_CONF,
119 DMOV_CHANNEL_MODEM_CONF,
120 DMOV_CHANNEL_MODEM_CONF,
121 DMOV_CHANNEL_MODEM_CONF,
122};
123
124#define DMOV_CRCI_DEFAULT_CONF { .sd = 1, .blk_size = 0 }
125#define DMOV_CRCI_CONF(secd, blk) { .sd = secd, .blk_size = blk }
126
127static struct msm_dmov_crci_conf adm0_crci_conf[] = {
128 DMOV_CRCI_DEFAULT_CONF,
129 DMOV_CRCI_DEFAULT_CONF,
130 DMOV_CRCI_DEFAULT_CONF,
131 DMOV_CRCI_DEFAULT_CONF,
132 DMOV_CRCI_DEFAULT_CONF,
133 DMOV_CRCI_DEFAULT_CONF,
134 DMOV_CRCI_CONF(1, 4),
135 DMOV_CRCI_DEFAULT_CONF,
136 DMOV_CRCI_DEFAULT_CONF,
137 DMOV_CRCI_DEFAULT_CONF,
138 DMOV_CRCI_DEFAULT_CONF,
139 DMOV_CRCI_DEFAULT_CONF,
140 DMOV_CRCI_DEFAULT_CONF,
141 DMOV_CRCI_DEFAULT_CONF,
142 DMOV_CRCI_DEFAULT_CONF,
143 DMOV_CRCI_DEFAULT_CONF,
144};
145
146static struct msm_dmov_crci_conf adm1_crci_conf[] = {
147 DMOV_CRCI_DEFAULT_CONF,
148 DMOV_CRCI_CONF(1, 1),
149 DMOV_CRCI_CONF(1, 1),
150 DMOV_CRCI_DEFAULT_CONF,
151 DMOV_CRCI_CONF(1, 1),
152 DMOV_CRCI_CONF(1, 1),
153 DMOV_CRCI_DEFAULT_CONF,
154 DMOV_CRCI_DEFAULT_CONF,
155 DMOV_CRCI_DEFAULT_CONF,
156 DMOV_CRCI_DEFAULT_CONF,
157 DMOV_CRCI_DEFAULT_CONF,
158 DMOV_CRCI_DEFAULT_CONF,
159 DMOV_CRCI_DEFAULT_CONF,
160 DMOV_CRCI_DEFAULT_CONF,
161 DMOV_CRCI_CONF(1, 1),
162 DMOV_CRCI_DEFAULT_CONF,
163};
164
165static struct msm_dmov_conf dmov_conf[] = {
166 {
167 .crci_conf = adm0_crci_conf,
168 .chan_conf = adm0_chan_conf,
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700169 .lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
170 .list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 .clk_ctl = CLK_DIS,
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700172 .work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
173 msm_dmov_clock_work),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 }, {
175 .crci_conf = adm1_crci_conf,
176 .chan_conf = adm1_chan_conf,
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700177 .lock = __MUTEX_INITIALIZER(dmov_conf[1].lock),
178 .list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 .clk_ctl = CLK_DIS,
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700180 .work = __DELAYED_WORK_INITIALIZER(dmov_conf[1].work,
181 msm_dmov_clock_work),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 }
183};
184#else
185static struct msm_dmov_conf dmov_conf[] = {
186 {
187 .crci_conf = NULL,
188 .chan_conf = NULL,
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700189 .lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
190 .list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 .clk_ctl = CLK_DIS,
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700192 .work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
193 msm_dmov_clock_work),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 }
195};
196#endif
197
198#define MSM_DMOV_ID_COUNT (MSM_DMOV_CHANNEL_COUNT * ARRAY_SIZE(dmov_conf))
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700199#define DMOV_REG(name, adm) ((name) + (dmov_conf[adm].base) +\
200 (dmov_conf[adm].sd * dmov_conf[adm].sd_size))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201#define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
202#define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
203#define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
204
205#ifdef CONFIG_MSM_ADM3
206#define DMOV_IRQ_TO_ADM(irq) \
207({ \
208 typeof(irq) _irq = irq; \
209 ((_irq == INT_ADM1_MASTER) || (_irq == INT_ADM1_AARM)); \
210})
211#else
212#define DMOV_IRQ_TO_ADM(irq) 0
213#endif
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800214
215enum {
216 MSM_DMOV_PRINT_ERRORS = 1,
217 MSM_DMOV_PRINT_IO = 2,
218 MSM_DMOV_PRINT_FLOW = 4
219};
220
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800221unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
222
223#define MSM_DMOV_DPRINTF(mask, format, args...) \
224 do { \
225 if ((mask) & msm_dmov_print_mask) \
226 printk(KERN_ERR format, args); \
227 } while (0)
228#define PRINT_ERROR(format, args...) \
229 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
230#define PRINT_IO(format, args...) \
231 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
232#define PRINT_FLOW(format, args...) \
233 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
234
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530235static int msm_dmov_clk_toggle(int adm, int on)
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700236{
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530237 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530239 if (on) {
240 ret = clk_enable(dmov_conf[adm].clk);
241 if (ret)
242 goto err;
243 if (dmov_conf[adm].pclk) {
244 ret = clk_enable(dmov_conf[adm].pclk);
245 if (ret) {
246 clk_disable(dmov_conf[adm].clk);
247 goto err;
248 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249 }
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530250 if (dmov_conf[adm].ebiclk) {
251 ret = clk_enable(dmov_conf[adm].ebiclk);
252 if (ret) {
253 if (dmov_conf[adm].pclk)
254 clk_disable(dmov_conf[adm].pclk);
255 clk_disable(dmov_conf[adm].clk);
256 }
Jeff Ohlstein82dd1662012-04-05 13:05:59 -0700257 }
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530258 } else {
259 clk_disable(dmov_conf[adm].clk);
260 if (dmov_conf[adm].pclk)
261 clk_disable(dmov_conf[adm].pclk);
262 if (dmov_conf[adm].ebiclk)
263 clk_disable(dmov_conf[adm].ebiclk);
Jeff Ohlstein82dd1662012-04-05 13:05:59 -0700264 }
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530265err:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 return ret;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700267}
268
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700269static void msm_dmov_clock_work(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270{
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700271 struct msm_dmov_conf *conf =
272 container_of(to_delayed_work(work), struct msm_dmov_conf, work);
273 int adm = DMOV_IRQ_TO_ADM(conf->irq);
274 mutex_lock(&conf->lock);
275 if (conf->clk_ctl == CLK_TO_BE_DIS) {
276 BUG_ON(conf->channel_active);
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530277 msm_dmov_clk_toggle(adm, 0);
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700278 conf->clk_ctl = CLK_DIS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 }
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700280 mutex_unlock(&conf->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281}
282
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700283enum {
284 NOFLUSH = 0,
285 GRACEFUL,
286 NONGRACEFUL,
287};
288
289/* Caller must hold the list lock */
290static struct msm_dmov_cmd *start_ready_cmd(unsigned ch, int adm)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800291{
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700292 struct msm_dmov_cmd *cmd;
293
294 if (list_empty(&dmov_conf[adm].ready_commands[ch]))
295 return NULL;
296
297 cmd = list_entry(dmov_conf[adm].ready_commands[ch].next, typeof(*cmd),
298 list);
299 list_del(&cmd->list);
300 if (cmd->exec_func)
301 cmd->exec_func(cmd);
302 list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
303 if (!dmov_conf[adm].channel_active)
304 enable_irq(dmov_conf[adm].irq);
305 dmov_conf[adm].channel_active |= BIT(ch);
306 PRINT_IO("msm dmov enqueue command, %x, ch %d\n", cmd->cmdptr, ch);
307 writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
308
309 return cmd;
310}
311
312static void msm_dmov_enqueue_cmd_ext_work(struct work_struct *work)
313{
314 struct msm_dmov_cmd *cmd =
315 container_of(work, struct msm_dmov_cmd, work);
316 unsigned id = cmd->id;
317 unsigned status;
318 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 int adm = DMOV_ID_TO_ADM(id);
320 int ch = DMOV_ID_TO_CHAN(id);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800321
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700322 mutex_lock(&dmov_conf[adm].lock);
Pankaj Kumar253bafc2012-03-07 16:32:51 +0530323 if (dmov_conf[adm].clk_ctl == CLK_DIS) {
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530324 status = msm_dmov_clk_toggle(adm, 1);
Pankaj Kumar253bafc2012-03-07 16:32:51 +0530325 if (status != 0)
326 goto error;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700327 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 dmov_conf[adm].clk_ctl = CLK_EN;
329
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700330 spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
331
332 cmd = list_entry(dmov_conf[adm].staged_commands[ch].next, typeof(*cmd),
333 list);
334 list_del(&cmd->list);
335 list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700337 if (status & DMOV_STATUS_CMD_PTR_RDY) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338 PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
339 id, status);
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700340 cmd = start_ready_cmd(ch, adm);
341 /*
342 * We added something to the ready list, and still hold the
343 * list lock. Thus, no need to check for cmd == NULL
344 */
345 if (cmd->toflush) {
346 int flush = (cmd->toflush == GRACEFUL) ? 1 << 31 : 0;
347 writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
Aparna Mallavarapubd9f2602012-05-19 23:55:07 +0530348 }
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700349 } else {
350 cmd->toflush = 0;
351 if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
352 !list_empty(&dmov_conf[adm].ready_commands[ch]))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
354 "status %x\n", id, status);
355 PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
356 "%x\n", id, status);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800357 }
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700358 if (!dmov_conf[adm].channel_active) {
359 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
360 schedule_delayed_work(&dmov_conf[adm].work, HZ);
361 }
362 spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
Pankaj Kumar253bafc2012-03-07 16:32:51 +0530363error:
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700364 mutex_unlock(&dmov_conf[adm].lock);
365}
366
367static void __msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
368{
369 int adm = DMOV_ID_TO_ADM(id);
370 int ch = DMOV_ID_TO_CHAN(id);
371 unsigned long flags;
372 cmd->id = id;
373 cmd->toflush = 0;
374
375 spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
376 list_add_tail(&cmd->list, &dmov_conf[adm].staged_commands[ch]);
377 spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
378
379 queue_work(dmov_conf[adm].cmd_wq, &cmd->work);
380}
381
382void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
383{
384 INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
385 __msm_dmov_enqueue_cmd_ext(id, cmd);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800386}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
388
389void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
390{
391 /* Disable callback function (for backwards compatibility) */
392 cmd->exec_func = NULL;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700393 INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
394 __msm_dmov_enqueue_cmd_ext(id, cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395}
396EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
397
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700398void msm_dmov_flush(unsigned int id, int graceful)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399{
400 unsigned long irq_flags;
401 int ch = DMOV_ID_TO_CHAN(id);
402 int adm = DMOV_ID_TO_ADM(id);
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700403 int flush = graceful ? DMOV_FLUSH_TYPE : 0;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700404 struct msm_dmov_cmd *cmd;
405
406 spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 /* XXX not checking if flush cmd sent already */
408 if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
409 PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700410 writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411 }
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700412 list_for_each_entry(cmd, &dmov_conf[adm].staged_commands[ch], list)
413 cmd->toflush = graceful ? GRACEFUL : NONGRACEFUL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 /* spin_unlock_irqrestore has the necessary barrier */
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700415 spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416}
417EXPORT_SYMBOL(msm_dmov_flush);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800418
419struct msm_dmov_exec_cmdptr_cmd {
420 struct msm_dmov_cmd dmov_cmd;
421 struct completion complete;
422 unsigned id;
423 unsigned int result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700424 struct msm_dmov_errdata err;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800425};
426
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700427static void
428dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
429 unsigned int result,
430 struct msm_dmov_errdata *err)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800431{
432 struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
433 cmd->result = result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700434 if (result != 0x80000002 && err)
435 memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
436
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800437 complete(&cmd->complete);
438}
439
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700440int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800441{
442 struct msm_dmov_exec_cmdptr_cmd cmd;
443
444 PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
445
446 cmd.dmov_cmd.cmdptr = cmdptr;
447 cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 cmd.dmov_cmd.exec_func = NULL;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800449 cmd.id = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 cmd.result = 0;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700451 INIT_WORK_ONSTACK(&cmd.dmov_cmd.work, msm_dmov_enqueue_cmd_ext_work);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800452 init_completion(&cmd.complete);
453
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700454 __msm_dmov_enqueue_cmd_ext(id, &cmd.dmov_cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455 wait_for_completion_io(&cmd.complete);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800456
457 if (cmd.result != 0x80000002) {
458 PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
459 PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700460 id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800461 return -EIO;
462 }
463 PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
464 return 0;
465}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466EXPORT_SYMBOL(msm_dmov_exec_cmd);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
469{
470 errdata->flush[0] = readl_relaxed(DMOV_REG(DMOV_FLUSH0(ch), adm));
471 errdata->flush[1] = readl_relaxed(DMOV_REG(DMOV_FLUSH1(ch), adm));
Jeff Ohlstein686caf42012-05-03 11:23:51 -0700472 errdata->flush[2] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 errdata->flush[3] = readl_relaxed(DMOV_REG(DMOV_FLUSH3(ch), adm));
474 errdata->flush[4] = readl_relaxed(DMOV_REG(DMOV_FLUSH4(ch), adm));
475 errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
476}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800477
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700478static irqreturn_t msm_dmov_isr(int irq, void *dev_id)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800479{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 unsigned int int_status;
481 unsigned int mask;
482 unsigned int id;
483 unsigned int ch;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800484 unsigned long irq_flags;
485 unsigned int ch_status;
486 unsigned int ch_result;
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700487 unsigned int valid = 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800488 struct msm_dmov_cmd *cmd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 int adm = DMOV_IRQ_TO_ADM(irq);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800490
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700491 mutex_lock(&dmov_conf[adm].lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 /* read and clear isr */
493 int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800494 PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
495
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700496 spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800497 while (int_status) {
498 mask = int_status & -int_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 ch = fls(mask) - 1;
500 id = DMOV_CHAN_ADM_TO_ID(ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800501 PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
502 int_status &= ~mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800504 if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 PRINT_FLOW("msm_datamover_irq_handler id %d, "
506 "result not valid %x\n", id, ch_status);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800507 continue;
508 }
509 do {
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700510 valid = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 ch_result = readl_relaxed(DMOV_REG(DMOV_RSLT(ch), adm));
512 if (list_empty(&dmov_conf[adm].active_commands[ch])) {
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800513 PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
514 "with no active command, status %x, result %x\n",
515 id, ch_status, ch_result);
516 cmd = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 } else {
518 cmd = list_entry(dmov_conf[adm].
519 active_commands[ch].next, typeof(*cmd),
520 list);
521 }
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800522 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
523 if (ch_result & DMOV_RSLT_DONE) {
524 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
525 id, ch_status);
526 PRINT_IO("msm_datamover_irq_handler id %d, got result "
527 "for %p, result %x\n", id, cmd, ch_result);
528 if (cmd) {
529 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700530 cmd->complete_func(cmd, ch_result, NULL);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800531 }
532 }
533 if (ch_result & DMOV_RSLT_FLUSH) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700534 struct msm_dmov_errdata errdata;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 fill_errdata(&errdata, ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800537 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700538 PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800539 if (cmd) {
540 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700541 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800542 }
543 }
544 if (ch_result & DMOV_RSLT_ERROR) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700545 struct msm_dmov_errdata errdata;
546
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547 fill_errdata(&errdata, ch, adm);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700548
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800549 PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700550 PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800551 if (cmd) {
552 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700553 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800554 }
555 /* this does not seem to work, once we get an error */
556 /* the datamover will no longer accept commands */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 writel_relaxed(0, DMOV_REG(DMOV_FLUSH0(ch),
558 adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800559 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 rmb();
561 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
562 adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800563 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700564 if (ch_status & DMOV_STATUS_CMD_PTR_RDY)
565 start_ready_cmd(ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800566 } while (ch_status & DMOV_STATUS_RSLT_VALID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567 if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700568 list_empty(&dmov_conf[adm].ready_commands[ch]))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 dmov_conf[adm].channel_active &= ~(1U << ch);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800570 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
571 }
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700572 spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700573
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700574 if (!dmov_conf[adm].channel_active && valid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 disable_irq_nosync(dmov_conf[adm].irq);
576 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700577 schedule_delayed_work(&dmov_conf[adm].work, HZ);
Arve Hjønnevågc5541072009-06-25 17:03:14 -0700578 }
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700579
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700580 mutex_unlock(&dmov_conf[adm].lock);
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700581 return valid ? IRQ_HANDLED : IRQ_NONE;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800582}
583
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584static int msm_dmov_suspend_late(struct device *dev)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800585{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 struct platform_device *pdev = to_platform_device(dev);
587 int adm = (pdev->id >= 0) ? pdev->id : 0;
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700588 mutex_lock(&dmov_conf[adm].lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
590 BUG_ON(dmov_conf[adm].channel_active);
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530591 msm_dmov_clk_toggle(adm, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 dmov_conf[adm].clk_ctl = CLK_DIS;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800593 }
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700594 mutex_unlock(&dmov_conf[adm].lock);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700595 return 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800596}
597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598static int msm_dmov_runtime_suspend(struct device *dev)
599{
600 dev_dbg(dev, "pm_runtime: suspending...\n");
601 return 0;
602}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604static int msm_dmov_runtime_resume(struct device *dev)
605{
606 dev_dbg(dev, "pm_runtime: resuming...\n");
607 return 0;
608}
609
610static int msm_dmov_runtime_idle(struct device *dev)
611{
612 dev_dbg(dev, "pm_runtime: idling...\n");
613 return 0;
614}
615
616static struct dev_pm_ops msm_dmov_dev_pm_ops = {
617 .runtime_suspend = msm_dmov_runtime_suspend,
618 .runtime_resume = msm_dmov_runtime_resume,
619 .runtime_idle = msm_dmov_runtime_idle,
620 .suspend = msm_dmov_suspend_late,
621};
622
623static int msm_dmov_init_clocks(struct platform_device *pdev)
624{
625 int adm = (pdev->id >= 0) ? pdev->id : 0;
626 int ret;
627
Matt Wagantalle1a86062011-08-18 17:46:10 -0700628 dmov_conf[adm].clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 if (IS_ERR(dmov_conf[adm].clk)) {
630 printk(KERN_ERR "%s: Error getting adm_clk\n", __func__);
631 dmov_conf[adm].clk = NULL;
632 return -ENOENT;
633 }
634
Matt Wagantalle1a86062011-08-18 17:46:10 -0700635 dmov_conf[adm].pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 if (IS_ERR(dmov_conf[adm].pclk)) {
637 dmov_conf[adm].pclk = NULL;
638 /* pclk not present on all SoCs, don't bail on failure */
639 }
640
Matt Wagantalle1a86062011-08-18 17:46:10 -0700641 dmov_conf[adm].ebiclk = clk_get(&pdev->dev, "mem_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 if (IS_ERR(dmov_conf[adm].ebiclk)) {
643 dmov_conf[adm].ebiclk = NULL;
644 /* ebiclk not present on all SoCs, don't bail on failure */
645 } else {
646 ret = clk_set_rate(dmov_conf[adm].ebiclk, 27000000);
647 if (ret)
648 return -ENOENT;
649 }
650
651 return 0;
652}
653
654static void config_datamover(int adm)
655{
656#ifdef CONFIG_MSM_ADM3
657 int i;
658 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
659 struct msm_dmov_chan_conf *chan_conf =
660 dmov_conf[adm].chan_conf;
661 unsigned conf;
662 /* Only configure scorpion channels */
663 if (chan_conf[i].sd <= 1) {
664 conf = readl_relaxed(DMOV_REG(DMOV_CONF(i), adm));
665 conf &= ~DMOV_CONF_SD(7);
666 conf |= DMOV_CONF_SD(chan_conf[i].sd);
667 writel_relaxed(conf | DMOV_CONF_SHADOW_EN,
668 DMOV_REG(DMOV_CONF(i), adm));
669 }
670 }
671 for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
672 struct msm_dmov_crci_conf *crci_conf =
673 dmov_conf[adm].crci_conf;
674
675 writel_relaxed(DMOV_CRCI_CTL_BLK_SZ(crci_conf[i].blk_size),
676 DMOV_REG(DMOV_CRCI_CTL(i), adm));
677 }
678#endif
679}
680
681static int msm_dmov_probe(struct platform_device *pdev)
682{
683 int adm = (pdev->id >= 0) ? pdev->id : 0;
684 int i;
685 int ret;
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700686 struct msm_dmov_pdata *pdata = pdev->dev.platform_data;
687 struct resource *irqres =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688 platform_get_resource(pdev, IORESOURCE_IRQ, 0);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700689 struct resource *mres =
690 platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700692 if (pdata) {
693 dmov_conf[adm].sd = pdata->sd;
694 dmov_conf[adm].sd_size = pdata->sd_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 }
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700696 if (!dmov_conf[adm].sd_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 return -ENXIO;
698
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700699 if (!irqres || !irqres->start)
700 return -ENXIO;
701 dmov_conf[adm].irq = irqres->start;
702
703 if (!mres || !mres->start)
704 return -ENXIO;
705 dmov_conf[adm].base = ioremap_nocache(mres->start, resource_size(mres));
706 if (!dmov_conf[adm].base)
707 return -ENOMEM;
708
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700709 dmov_conf[adm].cmd_wq = alloc_ordered_workqueue("dmov%d_wq", 0, adm);
710 if (!dmov_conf[adm].cmd_wq) {
711 PRINT_ERROR("Couldn't allocate ADM%d workqueue.\n", adm);
712 ret = -ENOMEM;
713 goto out_map;
714 }
715
716 ret = request_threaded_irq(dmov_conf[adm].irq, NULL, msm_dmov_isr,
717 IRQF_ONESHOT, "msmdatamover", NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 if (ret) {
719 PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
720 dmov_conf[adm].irq);
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700721 goto out_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 }
723 disable_irq(dmov_conf[adm].irq);
724 ret = msm_dmov_init_clocks(pdev);
725 if (ret) {
726 PRINT_ERROR("Requesting ADM%d clocks failed\n", adm);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700727 goto out_irq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728 }
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530729 ret = msm_dmov_clk_toggle(adm, 1);
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700730 if (ret) {
731 PRINT_ERROR("Enabling ADM%d clocks failed\n", adm);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700732 goto out_irq;
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700733 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734
735 config_datamover(adm);
736 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700737 INIT_LIST_HEAD(&dmov_conf[adm].staged_commands[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
739 INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
740
741 writel_relaxed(DMOV_RSLT_CONF_IRQ_EN
742 | DMOV_RSLT_CONF_FORCE_FLUSH_RSLT,
743 DMOV_REG(DMOV_RSLT_CONF(i), adm));
744 }
745 wmb();
Aparna Mallavarapu08840532012-05-19 23:54:47 +0530746 msm_dmov_clk_toggle(adm, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700747 return ret;
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700748out_irq:
749 free_irq(dmov_conf[adm].irq, NULL);
Jeff Ohlsteina530a7b2012-06-28 19:42:03 -0700750out_wq:
751 destroy_workqueue(dmov_conf[adm].cmd_wq);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700752out_map:
753 iounmap(dmov_conf[adm].base);
754 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755}
756
757static struct platform_driver msm_dmov_driver = {
758 .probe = msm_dmov_probe,
759 .driver = {
760 .name = MODULE_NAME,
761 .owner = THIS_MODULE,
762 .pm = &msm_dmov_dev_pm_ops,
763 },
764};
765
766/* static int __init */
767static int __init msm_init_datamover(void)
768{
769 int ret;
770 ret = platform_driver_register(&msm_dmov_driver);
771 if (ret)
772 return ret;
773 return 0;
774}
775arch_initcall(msm_init_datamover);