blob: ddf77321cb3bdbb8327dca6932f8efa09e6eb5c9 [file] [log] [blame]
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08001/* linux/arch/arm/mach-msm/dma.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Arve Hjønnevågc5541072009-06-25 17:03:14 -070017#include <linux/clk.h>
18#include <linux/err.h>
Russell Kingfced80c2008-09-06 12:10:45 +010019#include <linux/io.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080020#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/spinlock.h>
24#include <linux/pm_runtime.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010025#include <mach/dma.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#define MODULE_NAME "msm_dmov"
28
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080029#define MSM_DMOV_CHANNEL_COUNT 16
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#define MSM_DMOV_CRCI_COUNT 16
31
32enum {
33 CLK_DIS,
34 CLK_TO_BE_DIS,
35 CLK_EN
36};
37
38struct msm_dmov_ci_conf {
39 int start;
40 int end;
41 int burst;
42};
43
44struct msm_dmov_crci_conf {
45 int sd;
46 int blk_size;
47};
48
49struct msm_dmov_chan_conf {
50 int sd;
51 int block;
52 int priority;
53};
54
55struct msm_dmov_conf {
56 void *base;
57 struct msm_dmov_crci_conf *crci_conf;
58 struct msm_dmov_chan_conf *chan_conf;
59 int channel_active;
60 struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
61 struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 spinlock_t lock;
63 unsigned int irq;
64 struct clk *clk;
65 struct clk *pclk;
66 struct clk *ebiclk;
67 unsigned int clk_ctl;
68 struct timer_list timer;
69};
70
71static void msm_dmov_clock_timer(unsigned long);
72static int msm_dmov_clk_toggle(int, int);
73
74#ifdef CONFIG_ARCH_MSM8X60
75
76#define DMOV_CHANNEL_DEFAULT_CONF { .sd = 1, .block = 0, .priority = 0 }
77#define DMOV_CHANNEL_MODEM_CONF { .sd = 3, .block = 0, .priority = 0 }
78#define DMOV_CHANNEL_CONF(secd, blk, pri) \
79 { .sd = secd, .block = blk, .priority = pri }
80
81static struct msm_dmov_chan_conf adm0_chan_conf[] = {
82 DMOV_CHANNEL_DEFAULT_CONF,
83 DMOV_CHANNEL_DEFAULT_CONF,
84 DMOV_CHANNEL_DEFAULT_CONF,
85 DMOV_CHANNEL_DEFAULT_CONF,
86 DMOV_CHANNEL_DEFAULT_CONF,
87 DMOV_CHANNEL_DEFAULT_CONF,
88 DMOV_CHANNEL_DEFAULT_CONF,
89 DMOV_CHANNEL_DEFAULT_CONF,
90 DMOV_CHANNEL_DEFAULT_CONF,
91 DMOV_CHANNEL_DEFAULT_CONF,
92 DMOV_CHANNEL_MODEM_CONF,
93 DMOV_CHANNEL_MODEM_CONF,
94 DMOV_CHANNEL_MODEM_CONF,
95 DMOV_CHANNEL_MODEM_CONF,
96 DMOV_CHANNEL_MODEM_CONF,
97 DMOV_CHANNEL_DEFAULT_CONF,
98};
99
100static struct msm_dmov_chan_conf adm1_chan_conf[] = {
101 DMOV_CHANNEL_DEFAULT_CONF,
102 DMOV_CHANNEL_DEFAULT_CONF,
103 DMOV_CHANNEL_DEFAULT_CONF,
104 DMOV_CHANNEL_DEFAULT_CONF,
105 DMOV_CHANNEL_DEFAULT_CONF,
106 DMOV_CHANNEL_DEFAULT_CONF,
107 DMOV_CHANNEL_DEFAULT_CONF,
108 DMOV_CHANNEL_DEFAULT_CONF,
109 DMOV_CHANNEL_DEFAULT_CONF,
110 DMOV_CHANNEL_DEFAULT_CONF,
111 DMOV_CHANNEL_MODEM_CONF,
112 DMOV_CHANNEL_MODEM_CONF,
113 DMOV_CHANNEL_MODEM_CONF,
114 DMOV_CHANNEL_MODEM_CONF,
115 DMOV_CHANNEL_MODEM_CONF,
116 DMOV_CHANNEL_MODEM_CONF,
117};
118
119#define DMOV_CRCI_DEFAULT_CONF { .sd = 1, .blk_size = 0 }
120#define DMOV_CRCI_CONF(secd, blk) { .sd = secd, .blk_size = blk }
121
122static struct msm_dmov_crci_conf adm0_crci_conf[] = {
123 DMOV_CRCI_DEFAULT_CONF,
124 DMOV_CRCI_DEFAULT_CONF,
125 DMOV_CRCI_DEFAULT_CONF,
126 DMOV_CRCI_DEFAULT_CONF,
127 DMOV_CRCI_DEFAULT_CONF,
128 DMOV_CRCI_DEFAULT_CONF,
129 DMOV_CRCI_CONF(1, 4),
130 DMOV_CRCI_DEFAULT_CONF,
131 DMOV_CRCI_DEFAULT_CONF,
132 DMOV_CRCI_DEFAULT_CONF,
133 DMOV_CRCI_DEFAULT_CONF,
134 DMOV_CRCI_DEFAULT_CONF,
135 DMOV_CRCI_DEFAULT_CONF,
136 DMOV_CRCI_DEFAULT_CONF,
137 DMOV_CRCI_DEFAULT_CONF,
138 DMOV_CRCI_DEFAULT_CONF,
139};
140
141static struct msm_dmov_crci_conf adm1_crci_conf[] = {
142 DMOV_CRCI_DEFAULT_CONF,
143 DMOV_CRCI_CONF(1, 1),
144 DMOV_CRCI_CONF(1, 1),
145 DMOV_CRCI_DEFAULT_CONF,
146 DMOV_CRCI_CONF(1, 1),
147 DMOV_CRCI_CONF(1, 1),
148 DMOV_CRCI_DEFAULT_CONF,
149 DMOV_CRCI_DEFAULT_CONF,
150 DMOV_CRCI_DEFAULT_CONF,
151 DMOV_CRCI_DEFAULT_CONF,
152 DMOV_CRCI_DEFAULT_CONF,
153 DMOV_CRCI_DEFAULT_CONF,
154 DMOV_CRCI_DEFAULT_CONF,
155 DMOV_CRCI_DEFAULT_CONF,
156 DMOV_CRCI_CONF(1, 1),
157 DMOV_CRCI_DEFAULT_CONF,
158};
159
160static struct msm_dmov_conf dmov_conf[] = {
161 {
162 .crci_conf = adm0_crci_conf,
163 .chan_conf = adm0_chan_conf,
164 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
165 .clk_ctl = CLK_DIS,
166 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
167 }, {
168 .crci_conf = adm1_crci_conf,
169 .chan_conf = adm1_chan_conf,
170 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
171 .clk_ctl = CLK_DIS,
172 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 1),
173 }
174};
175#else
176static struct msm_dmov_conf dmov_conf[] = {
177 {
178 .crci_conf = NULL,
179 .chan_conf = NULL,
180 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
181 .clk_ctl = CLK_DIS,
182 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
183 }
184};
185#endif
186
187#define MSM_DMOV_ID_COUNT (MSM_DMOV_CHANNEL_COUNT * ARRAY_SIZE(dmov_conf))
188#define DMOV_REG(name, adm) ((name) + (dmov_conf[adm].base))
189#define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
190#define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
191#define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
192
193#ifdef CONFIG_MSM_ADM3
194#define DMOV_IRQ_TO_ADM(irq) \
195({ \
196 typeof(irq) _irq = irq; \
197 ((_irq == INT_ADM1_MASTER) || (_irq == INT_ADM1_AARM)); \
198})
199#else
200#define DMOV_IRQ_TO_ADM(irq) 0
201#endif
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800202
203enum {
204 MSM_DMOV_PRINT_ERRORS = 1,
205 MSM_DMOV_PRINT_IO = 2,
206 MSM_DMOV_PRINT_FLOW = 4
207};
208
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800209unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
210
211#define MSM_DMOV_DPRINTF(mask, format, args...) \
212 do { \
213 if ((mask) & msm_dmov_print_mask) \
214 printk(KERN_ERR format, args); \
215 } while (0)
216#define PRINT_ERROR(format, args...) \
217 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
218#define PRINT_IO(format, args...) \
219 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
220#define PRINT_FLOW(format, args...) \
221 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223static int msm_dmov_clk_toggle(int adm, int on)
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700224{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 int ret = 0;
226
227 if (on) {
228 ret = clk_enable(dmov_conf[adm].clk);
229 if (ret)
230 goto err;
231 if (dmov_conf[adm].pclk) {
232 ret = clk_enable(dmov_conf[adm].pclk);
233 if (ret) {
234 clk_disable(dmov_conf[adm].clk);
235 goto err;
236 }
237 }
238 if (dmov_conf[adm].ebiclk) {
239 ret = clk_enable(dmov_conf[adm].ebiclk);
240 if (ret) {
241 if (dmov_conf[adm].pclk)
242 clk_disable(dmov_conf[adm].pclk);
243 clk_disable(dmov_conf[adm].clk);
244 }
245 }
246 } else {
247 clk_disable(dmov_conf[adm].clk);
248 if (dmov_conf[adm].pclk)
249 clk_disable(dmov_conf[adm].pclk);
250 if (dmov_conf[adm].ebiclk)
251 clk_disable(dmov_conf[adm].ebiclk);
252 }
253err:
254 return ret;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700255}
256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257static void msm_dmov_clock_timer(unsigned long adm)
258{
259 unsigned long irq_flags;
260 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
261 if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
262 BUG_ON(dmov_conf[adm].channel_active);
263 msm_dmov_clk_toggle(adm, 0);
264 dmov_conf[adm].clk_ctl = CLK_DIS;
265 }
266 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
267}
268
269void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful)
270{
271 int adm = DMOV_ID_TO_ADM(id);
272 int ch = DMOV_ID_TO_CHAN(id);
273 writel_relaxed((graceful << 31), DMOV_REG(DMOV_FLUSH0(ch), adm));
274 wmb();
275}
276EXPORT_SYMBOL(msm_dmov_stop_cmd);
277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800279{
280 unsigned long irq_flags;
281 unsigned int status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 int adm = DMOV_ID_TO_ADM(id);
283 int ch = DMOV_ID_TO_CHAN(id);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800284
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
286 if (dmov_conf[adm].clk_ctl == CLK_DIS)
287 msm_dmov_clk_toggle(adm, 1);
288 else if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS)
289 del_timer(&dmov_conf[adm].timer);
290 dmov_conf[adm].clk_ctl = CLK_EN;
291
292 status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700293 if (status & DMOV_STATUS_CMD_PTR_RDY) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
295 id, status);
296 if (cmd->exec_func)
297 cmd->exec_func(cmd);
298 list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
299 if (!dmov_conf[adm].channel_active)
300 enable_irq(dmov_conf[adm].irq);
301 dmov_conf[adm].channel_active |= 1U << ch;
302 PRINT_IO("Writing %x exactly to register", cmd->cmdptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800304 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305 if (!dmov_conf[adm].channel_active) {
306 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
307 mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
308 }
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700309 if (list_empty(&dmov_conf[adm].active_commands[ch]))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
311 "status %x\n", id, status);
312 PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
313 "%x\n", id, status);
314 list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800315 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800317}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
319
320void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
321{
322 /* Disable callback function (for backwards compatibility) */
323 cmd->exec_func = NULL;
324
325 msm_dmov_enqueue_cmd_ext(id, cmd);
326}
327EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
328
329void msm_dmov_flush(unsigned int id)
330{
331 unsigned long irq_flags;
332 int ch = DMOV_ID_TO_CHAN(id);
333 int adm = DMOV_ID_TO_ADM(id);
334 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
335 /* XXX not checking if flush cmd sent already */
336 if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
337 PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
338 writel_relaxed(DMOV_FLUSH_TYPE, DMOV_REG(DMOV_FLUSH0(ch), adm));
339 }
340 /* spin_unlock_irqrestore has the necessary barrier */
341 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
342}
343EXPORT_SYMBOL(msm_dmov_flush);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800344
345struct msm_dmov_exec_cmdptr_cmd {
346 struct msm_dmov_cmd dmov_cmd;
347 struct completion complete;
348 unsigned id;
349 unsigned int result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700350 struct msm_dmov_errdata err;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800351};
352
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700353static void
354dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
355 unsigned int result,
356 struct msm_dmov_errdata *err)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800357{
358 struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
359 cmd->result = result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700360 if (result != 0x80000002 && err)
361 memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
362
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800363 complete(&cmd->complete);
364}
365
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700366int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800367{
368 struct msm_dmov_exec_cmdptr_cmd cmd;
369
370 PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
371
372 cmd.dmov_cmd.cmdptr = cmdptr;
373 cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 cmd.dmov_cmd.exec_func = NULL;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800375 cmd.id = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 cmd.result = 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800377 init_completion(&cmd.complete);
378
379 msm_dmov_enqueue_cmd(id, &cmd.dmov_cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 wait_for_completion_io(&cmd.complete);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800381
382 if (cmd.result != 0x80000002) {
383 PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
384 PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700385 id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800386 return -EIO;
387 }
388 PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
389 return 0;
390}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391EXPORT_SYMBOL(msm_dmov_exec_cmd);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
394{
395 errdata->flush[0] = readl_relaxed(DMOV_REG(DMOV_FLUSH0(ch), adm));
396 errdata->flush[1] = readl_relaxed(DMOV_REG(DMOV_FLUSH1(ch), adm));
397 errdata->flush[2] = readl_relaxed(DMOV_REG(DMOV_FLUSH2(ch), adm));
398 errdata->flush[3] = readl_relaxed(DMOV_REG(DMOV_FLUSH3(ch), adm));
399 errdata->flush[4] = readl_relaxed(DMOV_REG(DMOV_FLUSH4(ch), adm));
400 errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
401}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800402
403static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
404{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 unsigned int int_status;
406 unsigned int mask;
407 unsigned int id;
408 unsigned int ch;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800409 unsigned long irq_flags;
410 unsigned int ch_status;
411 unsigned int ch_result;
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700412 unsigned int valid = 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800413 struct msm_dmov_cmd *cmd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 int adm = DMOV_IRQ_TO_ADM(irq);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
417 /* read and clear isr */
418 int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800419 PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
420
421 while (int_status) {
422 mask = int_status & -int_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 ch = fls(mask) - 1;
424 id = DMOV_CHAN_ADM_TO_ID(ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800425 PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
426 int_status &= ~mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800428 if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 PRINT_FLOW("msm_datamover_irq_handler id %d, "
430 "result not valid %x\n", id, ch_status);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800431 continue;
432 }
433 do {
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700434 valid = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 ch_result = readl_relaxed(DMOV_REG(DMOV_RSLT(ch), adm));
436 if (list_empty(&dmov_conf[adm].active_commands[ch])) {
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800437 PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
438 "with no active command, status %x, result %x\n",
439 id, ch_status, ch_result);
440 cmd = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 } else {
442 cmd = list_entry(dmov_conf[adm].
443 active_commands[ch].next, typeof(*cmd),
444 list);
445 }
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800446 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
447 if (ch_result & DMOV_RSLT_DONE) {
448 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
449 id, ch_status);
450 PRINT_IO("msm_datamover_irq_handler id %d, got result "
451 "for %p, result %x\n", id, cmd, ch_result);
452 if (cmd) {
453 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700454 cmd->complete_func(cmd, ch_result, NULL);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800455 }
456 }
457 if (ch_result & DMOV_RSLT_FLUSH) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700458 struct msm_dmov_errdata errdata;
459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 fill_errdata(&errdata, ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800461 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700462 PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800463 if (cmd) {
464 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700465 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800466 }
467 }
468 if (ch_result & DMOV_RSLT_ERROR) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700469 struct msm_dmov_errdata errdata;
470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 fill_errdata(&errdata, ch, adm);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700472
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800473 PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700474 PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800475 if (cmd) {
476 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700477 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800478 }
479 /* this does not seem to work, once we get an error */
480 /* the datamover will no longer accept commands */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 writel_relaxed(0, DMOV_REG(DMOV_FLUSH0(ch),
482 adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800483 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 rmb();
485 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
486 adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800487 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 if ((ch_status & DMOV_STATUS_CMD_PTR_RDY) &&
489 !list_empty(&dmov_conf[adm].ready_commands[ch])) {
490 cmd = list_entry(dmov_conf[adm].
491 ready_commands[ch].next, typeof(*cmd),
492 list);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800493 list_del(&cmd->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 if (cmd->exec_func)
495 cmd->exec_func(cmd);
496 list_add_tail(&cmd->list,
497 &dmov_conf[adm].active_commands[ch]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800498 PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 writel_relaxed(cmd->cmdptr,
500 DMOV_REG(DMOV_CMD_PTR(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800501 }
502 } while (ch_status & DMOV_STATUS_RSLT_VALID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
504 list_empty(&dmov_conf[adm].ready_commands[ch]))
505 dmov_conf[adm].channel_active &= ~(1U << ch);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800506 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
507 }
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700508
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700509 if (!dmov_conf[adm].channel_active && valid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 disable_irq_nosync(dmov_conf[adm].irq);
511 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
512 mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
Arve Hjønnevågc5541072009-06-25 17:03:14 -0700513 }
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700514
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700516 return valid ? IRQ_HANDLED : IRQ_NONE;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800517}
518
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519static int msm_dmov_suspend_late(struct device *dev)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800520{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 unsigned long irq_flags;
522 struct platform_device *pdev = to_platform_device(dev);
523 int adm = (pdev->id >= 0) ? pdev->id : 0;
524 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
525 if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
526 BUG_ON(dmov_conf[adm].channel_active);
527 del_timer(&dmov_conf[adm].timer);
528 msm_dmov_clk_toggle(adm, 0);
529 dmov_conf[adm].clk_ctl = CLK_DIS;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800530 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700532 return 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800533}
534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535static int msm_dmov_runtime_suspend(struct device *dev)
536{
537 dev_dbg(dev, "pm_runtime: suspending...\n");
538 return 0;
539}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800540
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541static int msm_dmov_runtime_resume(struct device *dev)
542{
543 dev_dbg(dev, "pm_runtime: resuming...\n");
544 return 0;
545}
546
547static int msm_dmov_runtime_idle(struct device *dev)
548{
549 dev_dbg(dev, "pm_runtime: idling...\n");
550 return 0;
551}
552
553static struct dev_pm_ops msm_dmov_dev_pm_ops = {
554 .runtime_suspend = msm_dmov_runtime_suspend,
555 .runtime_resume = msm_dmov_runtime_resume,
556 .runtime_idle = msm_dmov_runtime_idle,
557 .suspend = msm_dmov_suspend_late,
558};
559
560static int msm_dmov_init_clocks(struct platform_device *pdev)
561{
562 int adm = (pdev->id >= 0) ? pdev->id : 0;
563 int ret;
564
Matt Wagantalle1a86062011-08-18 17:46:10 -0700565 dmov_conf[adm].clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 if (IS_ERR(dmov_conf[adm].clk)) {
567 printk(KERN_ERR "%s: Error getting adm_clk\n", __func__);
568 dmov_conf[adm].clk = NULL;
569 return -ENOENT;
570 }
571
Matt Wagantalle1a86062011-08-18 17:46:10 -0700572 dmov_conf[adm].pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 if (IS_ERR(dmov_conf[adm].pclk)) {
574 dmov_conf[adm].pclk = NULL;
575 /* pclk not present on all SoCs, don't bail on failure */
576 }
577
Matt Wagantalle1a86062011-08-18 17:46:10 -0700578 dmov_conf[adm].ebiclk = clk_get(&pdev->dev, "mem_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 if (IS_ERR(dmov_conf[adm].ebiclk)) {
580 dmov_conf[adm].ebiclk = NULL;
581 /* ebiclk not present on all SoCs, don't bail on failure */
582 } else {
583 ret = clk_set_rate(dmov_conf[adm].ebiclk, 27000000);
584 if (ret)
585 return -ENOENT;
586 }
587
588 return 0;
589}
590
591static void config_datamover(int adm)
592{
593#ifdef CONFIG_MSM_ADM3
594 int i;
595 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
596 struct msm_dmov_chan_conf *chan_conf =
597 dmov_conf[adm].chan_conf;
598 unsigned conf;
599 /* Only configure scorpion channels */
600 if (chan_conf[i].sd <= 1) {
601 conf = readl_relaxed(DMOV_REG(DMOV_CONF(i), adm));
602 conf &= ~DMOV_CONF_SD(7);
603 conf |= DMOV_CONF_SD(chan_conf[i].sd);
604 writel_relaxed(conf | DMOV_CONF_SHADOW_EN,
605 DMOV_REG(DMOV_CONF(i), adm));
606 }
607 }
608 for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
609 struct msm_dmov_crci_conf *crci_conf =
610 dmov_conf[adm].crci_conf;
611
612 writel_relaxed(DMOV_CRCI_CTL_BLK_SZ(crci_conf[i].blk_size),
613 DMOV_REG(DMOV_CRCI_CTL(i), adm));
614 }
615#endif
616}
617
618static int msm_dmov_probe(struct platform_device *pdev)
619{
620 int adm = (pdev->id >= 0) ? pdev->id : 0;
621 int i;
622 int ret;
623 struct resource *res =
624 platform_get_resource(pdev, IORESOURCE_IRQ, 0);
625
626 if (res) {
627 dmov_conf[adm].irq = res->start;
628 dmov_conf[adm].base = (void *)res->end;
629 }
630 if (!dmov_conf[adm].base || !dmov_conf[adm].irq)
631 return -ENXIO;
632
633 ret = request_irq(dmov_conf[adm].irq, msm_datamover_irq_handler,
634 0, "msmdatamover", NULL);
635 if (ret) {
636 PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
637 dmov_conf[adm].irq);
638 return ret;
639 }
640 disable_irq(dmov_conf[adm].irq);
641 ret = msm_dmov_init_clocks(pdev);
642 if (ret) {
643 PRINT_ERROR("Requesting ADM%d clocks failed\n", adm);
644 return -ENOENT;
645 }
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700646 ret = msm_dmov_clk_toggle(adm, 1);
647 if (ret) {
648 PRINT_ERROR("Enabling ADM%d clocks failed\n", adm);
649 return -ENOENT;
650 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651
652 config_datamover(adm);
653 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
654 INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
655 INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
656
657 writel_relaxed(DMOV_RSLT_CONF_IRQ_EN
658 | DMOV_RSLT_CONF_FORCE_FLUSH_RSLT,
659 DMOV_REG(DMOV_RSLT_CONF(i), adm));
660 }
661 wmb();
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700662 msm_dmov_clk_toggle(adm, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663 return ret;
664}
665
666static struct platform_driver msm_dmov_driver = {
667 .probe = msm_dmov_probe,
668 .driver = {
669 .name = MODULE_NAME,
670 .owner = THIS_MODULE,
671 .pm = &msm_dmov_dev_pm_ops,
672 },
673};
674
675/* static int __init */
676static int __init msm_init_datamover(void)
677{
678 int ret;
679 ret = platform_driver_register(&msm_dmov_driver);
680 if (ret)
681 return ret;
682 return 0;
683}
684arch_initcall(msm_init_datamover);