blob: 69c91f13fa7abe000eee1b16dff8712c116699e0 [file] [log] [blame]
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08001/* linux/arch/arm/mach-msm/dma.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Pankaj Kumar253bafc2012-03-07 16:32:51 +05304 * Copyright (c) 2008-2010, 2012 Code Aurora Forum. All rights reserved.
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Arve Hjønnevågc5541072009-06-25 17:03:14 -070017#include <linux/clk.h>
18#include <linux/err.h>
Russell Kingfced80c2008-09-06 12:10:45 +010019#include <linux/io.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080020#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/spinlock.h>
24#include <linux/pm_runtime.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010025#include <mach/dma.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#define MODULE_NAME "msm_dmov"
28
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080029#define MSM_DMOV_CHANNEL_COUNT 16
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#define MSM_DMOV_CRCI_COUNT 16
31
32enum {
33 CLK_DIS,
34 CLK_TO_BE_DIS,
35 CLK_EN
36};
37
38struct msm_dmov_ci_conf {
39 int start;
40 int end;
41 int burst;
42};
43
44struct msm_dmov_crci_conf {
45 int sd;
46 int blk_size;
47};
48
49struct msm_dmov_chan_conf {
50 int sd;
51 int block;
52 int priority;
53};
54
55struct msm_dmov_conf {
56 void *base;
57 struct msm_dmov_crci_conf *crci_conf;
58 struct msm_dmov_chan_conf *chan_conf;
59 int channel_active;
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -070060 int sd;
61 size_t sd_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
63 struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
Jeff Ohlstein48c02282012-04-11 14:54:23 -070064 spinlock_t lock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065 unsigned int irq;
66 struct clk *clk;
67 struct clk *pclk;
68 struct clk *ebiclk;
69 unsigned int clk_ctl;
Jeff Ohlstein48c02282012-04-11 14:54:23 -070070 struct timer_list timer;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071};
72
Jeff Ohlstein48c02282012-04-11 14:54:23 -070073static void msm_dmov_clock_timer(unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074static int msm_dmov_clk_toggle(int, int);
75
76#ifdef CONFIG_ARCH_MSM8X60
77
78#define DMOV_CHANNEL_DEFAULT_CONF { .sd = 1, .block = 0, .priority = 0 }
79#define DMOV_CHANNEL_MODEM_CONF { .sd = 3, .block = 0, .priority = 0 }
80#define DMOV_CHANNEL_CONF(secd, blk, pri) \
81 { .sd = secd, .block = blk, .priority = pri }
82
83static struct msm_dmov_chan_conf adm0_chan_conf[] = {
84 DMOV_CHANNEL_DEFAULT_CONF,
85 DMOV_CHANNEL_DEFAULT_CONF,
86 DMOV_CHANNEL_DEFAULT_CONF,
87 DMOV_CHANNEL_DEFAULT_CONF,
88 DMOV_CHANNEL_DEFAULT_CONF,
89 DMOV_CHANNEL_DEFAULT_CONF,
90 DMOV_CHANNEL_DEFAULT_CONF,
91 DMOV_CHANNEL_DEFAULT_CONF,
92 DMOV_CHANNEL_DEFAULT_CONF,
93 DMOV_CHANNEL_DEFAULT_CONF,
94 DMOV_CHANNEL_MODEM_CONF,
95 DMOV_CHANNEL_MODEM_CONF,
96 DMOV_CHANNEL_MODEM_CONF,
97 DMOV_CHANNEL_MODEM_CONF,
98 DMOV_CHANNEL_MODEM_CONF,
99 DMOV_CHANNEL_DEFAULT_CONF,
100};
101
102static struct msm_dmov_chan_conf adm1_chan_conf[] = {
103 DMOV_CHANNEL_DEFAULT_CONF,
104 DMOV_CHANNEL_DEFAULT_CONF,
105 DMOV_CHANNEL_DEFAULT_CONF,
106 DMOV_CHANNEL_DEFAULT_CONF,
107 DMOV_CHANNEL_DEFAULT_CONF,
108 DMOV_CHANNEL_DEFAULT_CONF,
109 DMOV_CHANNEL_DEFAULT_CONF,
110 DMOV_CHANNEL_DEFAULT_CONF,
111 DMOV_CHANNEL_DEFAULT_CONF,
112 DMOV_CHANNEL_DEFAULT_CONF,
113 DMOV_CHANNEL_MODEM_CONF,
114 DMOV_CHANNEL_MODEM_CONF,
115 DMOV_CHANNEL_MODEM_CONF,
116 DMOV_CHANNEL_MODEM_CONF,
117 DMOV_CHANNEL_MODEM_CONF,
118 DMOV_CHANNEL_MODEM_CONF,
119};
120
121#define DMOV_CRCI_DEFAULT_CONF { .sd = 1, .blk_size = 0 }
122#define DMOV_CRCI_CONF(secd, blk) { .sd = secd, .blk_size = blk }
123
124static struct msm_dmov_crci_conf adm0_crci_conf[] = {
125 DMOV_CRCI_DEFAULT_CONF,
126 DMOV_CRCI_DEFAULT_CONF,
127 DMOV_CRCI_DEFAULT_CONF,
128 DMOV_CRCI_DEFAULT_CONF,
129 DMOV_CRCI_DEFAULT_CONF,
130 DMOV_CRCI_DEFAULT_CONF,
131 DMOV_CRCI_CONF(1, 4),
132 DMOV_CRCI_DEFAULT_CONF,
133 DMOV_CRCI_DEFAULT_CONF,
134 DMOV_CRCI_DEFAULT_CONF,
135 DMOV_CRCI_DEFAULT_CONF,
136 DMOV_CRCI_DEFAULT_CONF,
137 DMOV_CRCI_DEFAULT_CONF,
138 DMOV_CRCI_DEFAULT_CONF,
139 DMOV_CRCI_DEFAULT_CONF,
140 DMOV_CRCI_DEFAULT_CONF,
141};
142
143static struct msm_dmov_crci_conf adm1_crci_conf[] = {
144 DMOV_CRCI_DEFAULT_CONF,
145 DMOV_CRCI_CONF(1, 1),
146 DMOV_CRCI_CONF(1, 1),
147 DMOV_CRCI_DEFAULT_CONF,
148 DMOV_CRCI_CONF(1, 1),
149 DMOV_CRCI_CONF(1, 1),
150 DMOV_CRCI_DEFAULT_CONF,
151 DMOV_CRCI_DEFAULT_CONF,
152 DMOV_CRCI_DEFAULT_CONF,
153 DMOV_CRCI_DEFAULT_CONF,
154 DMOV_CRCI_DEFAULT_CONF,
155 DMOV_CRCI_DEFAULT_CONF,
156 DMOV_CRCI_DEFAULT_CONF,
157 DMOV_CRCI_DEFAULT_CONF,
158 DMOV_CRCI_CONF(1, 1),
159 DMOV_CRCI_DEFAULT_CONF,
160};
161
162static struct msm_dmov_conf dmov_conf[] = {
163 {
164 .crci_conf = adm0_crci_conf,
165 .chan_conf = adm0_chan_conf,
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700166 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 .clk_ctl = CLK_DIS,
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700168 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 }, {
170 .crci_conf = adm1_crci_conf,
171 .chan_conf = adm1_chan_conf,
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700172 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 .clk_ctl = CLK_DIS,
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700174 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 }
176};
177#else
178static struct msm_dmov_conf dmov_conf[] = {
179 {
180 .crci_conf = NULL,
181 .chan_conf = NULL,
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700182 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 .clk_ctl = CLK_DIS,
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700184 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 }
186};
187#endif
188
189#define MSM_DMOV_ID_COUNT (MSM_DMOV_CHANNEL_COUNT * ARRAY_SIZE(dmov_conf))
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700190#define DMOV_REG(name, adm) ((name) + (dmov_conf[adm].base) +\
191 (dmov_conf[adm].sd * dmov_conf[adm].sd_size))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192#define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
193#define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
194#define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
195
196#ifdef CONFIG_MSM_ADM3
197#define DMOV_IRQ_TO_ADM(irq) \
198({ \
199 typeof(irq) _irq = irq; \
200 ((_irq == INT_ADM1_MASTER) || (_irq == INT_ADM1_AARM)); \
201})
202#else
203#define DMOV_IRQ_TO_ADM(irq) 0
204#endif
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800205
206enum {
207 MSM_DMOV_PRINT_ERRORS = 1,
208 MSM_DMOV_PRINT_IO = 2,
209 MSM_DMOV_PRINT_FLOW = 4
210};
211
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800212unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
213
214#define MSM_DMOV_DPRINTF(mask, format, args...) \
215 do { \
216 if ((mask) & msm_dmov_print_mask) \
217 printk(KERN_ERR format, args); \
218 } while (0)
219#define PRINT_ERROR(format, args...) \
220 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
221#define PRINT_IO(format, args...) \
222 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
223#define PRINT_FLOW(format, args...) \
224 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226static int msm_dmov_clk_toggle(int adm, int on)
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700227{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 int ret = 0;
229
230 if (on) {
231 ret = clk_enable(dmov_conf[adm].clk);
232 if (ret)
233 goto err;
234 if (dmov_conf[adm].pclk) {
235 ret = clk_enable(dmov_conf[adm].pclk);
236 if (ret) {
237 clk_disable(dmov_conf[adm].clk);
238 goto err;
239 }
240 }
241 if (dmov_conf[adm].ebiclk) {
242 ret = clk_enable(dmov_conf[adm].ebiclk);
243 if (ret) {
244 if (dmov_conf[adm].pclk)
245 clk_disable(dmov_conf[adm].pclk);
246 clk_disable(dmov_conf[adm].clk);
247 }
248 }
249 } else {
250 clk_disable(dmov_conf[adm].clk);
251 if (dmov_conf[adm].pclk)
252 clk_disable(dmov_conf[adm].pclk);
253 if (dmov_conf[adm].ebiclk)
254 clk_disable(dmov_conf[adm].ebiclk);
255 }
256err:
257 return ret;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700258}
259
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700260static void msm_dmov_clock_timer(unsigned long adm)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261{
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700262 unsigned long irq_flags;
263 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
264 if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
265 BUG_ON(dmov_conf[adm].channel_active);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 msm_dmov_clk_toggle(adm, 0);
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700267 dmov_conf[adm].clk_ctl = CLK_DIS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 }
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700269 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270}
271
272void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful)
273{
274 int adm = DMOV_ID_TO_ADM(id);
275 int ch = DMOV_ID_TO_CHAN(id);
276 writel_relaxed((graceful << 31), DMOV_REG(DMOV_FLUSH0(ch), adm));
277 wmb();
278}
279EXPORT_SYMBOL(msm_dmov_stop_cmd);
280
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700281void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800282{
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700283 unsigned long irq_flags;
284 unsigned int status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 int adm = DMOV_ID_TO_ADM(id);
286 int ch = DMOV_ID_TO_CHAN(id);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800287
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700288 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
Pankaj Kumar253bafc2012-03-07 16:32:51 +0530289 if (dmov_conf[adm].clk_ctl == CLK_DIS) {
290 status = msm_dmov_clk_toggle(adm, 1);
291 if (status != 0)
292 goto error;
293 } else if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS)
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700294 del_timer(&dmov_conf[adm].timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 dmov_conf[adm].clk_ctl = CLK_EN;
296
297 status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700298 if (status & DMOV_STATUS_CMD_PTR_RDY) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
300 id, status);
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700301 if (cmd->exec_func)
302 cmd->exec_func(cmd);
303 list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
304 if (!dmov_conf[adm].channel_active)
305 enable_irq(dmov_conf[adm].irq);
306 dmov_conf[adm].channel_active |= 1U << ch;
307 PRINT_IO("Writing %x exactly to register", cmd->cmdptr);
308 writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800309 } else {
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700310 if (!dmov_conf[adm].channel_active) {
311 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
312 mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
313 }
314 if (list_empty(&dmov_conf[adm].active_commands[ch]))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
316 "status %x\n", id, status);
317 PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
318 "%x\n", id, status);
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700319 list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800320 }
Pankaj Kumar253bafc2012-03-07 16:32:51 +0530321error:
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700322 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800323}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
325
326void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
327{
328 /* Disable callback function (for backwards compatibility) */
329 cmd->exec_func = NULL;
330
331 msm_dmov_enqueue_cmd_ext(id, cmd);
332}
333EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
334
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700335void msm_dmov_flush(unsigned int id, int graceful)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336{
337 unsigned long irq_flags;
338 int ch = DMOV_ID_TO_CHAN(id);
339 int adm = DMOV_ID_TO_ADM(id);
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700340 int flush = graceful ? DMOV_FLUSH_TYPE : 0;
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700341 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 /* XXX not checking if flush cmd sent already */
343 if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
344 PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700345 writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346 }
347 /* spin_unlock_irqrestore has the necessary barrier */
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700348 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349}
350EXPORT_SYMBOL(msm_dmov_flush);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800351
352struct msm_dmov_exec_cmdptr_cmd {
353 struct msm_dmov_cmd dmov_cmd;
354 struct completion complete;
355 unsigned id;
356 unsigned int result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700357 struct msm_dmov_errdata err;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800358};
359
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700360static void
361dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
362 unsigned int result,
363 struct msm_dmov_errdata *err)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800364{
365 struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
366 cmd->result = result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700367 if (result != 0x80000002 && err)
368 memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
369
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800370 complete(&cmd->complete);
371}
372
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700373int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800374{
375 struct msm_dmov_exec_cmdptr_cmd cmd;
376
377 PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
378
379 cmd.dmov_cmd.cmdptr = cmdptr;
380 cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 cmd.dmov_cmd.exec_func = NULL;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800382 cmd.id = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 cmd.result = 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800384 init_completion(&cmd.complete);
385
386 msm_dmov_enqueue_cmd(id, &cmd.dmov_cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 wait_for_completion_io(&cmd.complete);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800388
389 if (cmd.result != 0x80000002) {
390 PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
391 PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700392 id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800393 return -EIO;
394 }
395 PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
396 return 0;
397}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398EXPORT_SYMBOL(msm_dmov_exec_cmd);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
401{
402 errdata->flush[0] = readl_relaxed(DMOV_REG(DMOV_FLUSH0(ch), adm));
403 errdata->flush[1] = readl_relaxed(DMOV_REG(DMOV_FLUSH1(ch), adm));
Jeff Ohlstein686caf42012-05-03 11:23:51 -0700404 errdata->flush[2] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 errdata->flush[3] = readl_relaxed(DMOV_REG(DMOV_FLUSH3(ch), adm));
406 errdata->flush[4] = readl_relaxed(DMOV_REG(DMOV_FLUSH4(ch), adm));
407 errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
408}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800409
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700410static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800411{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 unsigned int int_status;
413 unsigned int mask;
414 unsigned int id;
415 unsigned int ch;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800416 unsigned long irq_flags;
417 unsigned int ch_status;
418 unsigned int ch_result;
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700419 unsigned int valid = 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800420 struct msm_dmov_cmd *cmd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 int adm = DMOV_IRQ_TO_ADM(irq);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800422
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700423 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 /* read and clear isr */
425 int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800426 PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
427
428 while (int_status) {
429 mask = int_status & -int_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 ch = fls(mask) - 1;
431 id = DMOV_CHAN_ADM_TO_ID(ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800432 PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
433 int_status &= ~mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800435 if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 PRINT_FLOW("msm_datamover_irq_handler id %d, "
437 "result not valid %x\n", id, ch_status);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800438 continue;
439 }
440 do {
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700441 valid = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 ch_result = readl_relaxed(DMOV_REG(DMOV_RSLT(ch), adm));
443 if (list_empty(&dmov_conf[adm].active_commands[ch])) {
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800444 PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
445 "with no active command, status %x, result %x\n",
446 id, ch_status, ch_result);
447 cmd = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 } else {
449 cmd = list_entry(dmov_conf[adm].
450 active_commands[ch].next, typeof(*cmd),
451 list);
452 }
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800453 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
454 if (ch_result & DMOV_RSLT_DONE) {
455 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
456 id, ch_status);
457 PRINT_IO("msm_datamover_irq_handler id %d, got result "
458 "for %p, result %x\n", id, cmd, ch_result);
459 if (cmd) {
460 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700461 cmd->complete_func(cmd, ch_result, NULL);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800462 }
463 }
464 if (ch_result & DMOV_RSLT_FLUSH) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700465 struct msm_dmov_errdata errdata;
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 fill_errdata(&errdata, ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800468 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700469 PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800470 if (cmd) {
471 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700472 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800473 }
474 }
475 if (ch_result & DMOV_RSLT_ERROR) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700476 struct msm_dmov_errdata errdata;
477
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 fill_errdata(&errdata, ch, adm);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700479
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800480 PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700481 PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800482 if (cmd) {
483 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700484 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800485 }
486 /* this does not seem to work, once we get an error */
487 /* the datamover will no longer accept commands */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 writel_relaxed(0, DMOV_REG(DMOV_FLUSH0(ch),
489 adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800490 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 rmb();
492 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
493 adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800494 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700495 if ((ch_status & DMOV_STATUS_CMD_PTR_RDY) &&
496 !list_empty(&dmov_conf[adm].ready_commands[ch])) {
497 cmd = list_entry(dmov_conf[adm].
498 ready_commands[ch].next, typeof(*cmd),
499 list);
500 list_del(&cmd->list);
501 if (cmd->exec_func)
502 cmd->exec_func(cmd);
503 list_add_tail(&cmd->list,
504 &dmov_conf[adm].active_commands[ch]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800505 PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700506 writel_relaxed(cmd->cmdptr,
507 DMOV_REG(DMOV_CMD_PTR(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800508 }
509 } while (ch_status & DMOV_STATUS_RSLT_VALID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700511 list_empty(&dmov_conf[adm].ready_commands[ch]))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 dmov_conf[adm].channel_active &= ~(1U << ch);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800513 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
514 }
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700515
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700516 if (!dmov_conf[adm].channel_active && valid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 disable_irq_nosync(dmov_conf[adm].irq);
518 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700519 mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
Arve Hjønnevågc5541072009-06-25 17:03:14 -0700520 }
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700521
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700522 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Jeff Ohlstein67b8e4c2011-09-06 18:13:15 -0700523 return valid ? IRQ_HANDLED : IRQ_NONE;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800524}
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526static int msm_dmov_suspend_late(struct device *dev)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800527{
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700528 unsigned long irq_flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 struct platform_device *pdev = to_platform_device(dev);
530 int adm = (pdev->id >= 0) ? pdev->id : 0;
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700531 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
533 BUG_ON(dmov_conf[adm].channel_active);
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700534 del_timer(&dmov_conf[adm].timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 msm_dmov_clk_toggle(adm, 0);
536 dmov_conf[adm].clk_ctl = CLK_DIS;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800537 }
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700538 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700539 return 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800540}
541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542static int msm_dmov_runtime_suspend(struct device *dev)
543{
544 dev_dbg(dev, "pm_runtime: suspending...\n");
545 return 0;
546}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548static int msm_dmov_runtime_resume(struct device *dev)
549{
550 dev_dbg(dev, "pm_runtime: resuming...\n");
551 return 0;
552}
553
554static int msm_dmov_runtime_idle(struct device *dev)
555{
556 dev_dbg(dev, "pm_runtime: idling...\n");
557 return 0;
558}
559
560static struct dev_pm_ops msm_dmov_dev_pm_ops = {
561 .runtime_suspend = msm_dmov_runtime_suspend,
562 .runtime_resume = msm_dmov_runtime_resume,
563 .runtime_idle = msm_dmov_runtime_idle,
564 .suspend = msm_dmov_suspend_late,
565};
566
567static int msm_dmov_init_clocks(struct platform_device *pdev)
568{
569 int adm = (pdev->id >= 0) ? pdev->id : 0;
570 int ret;
571
Matt Wagantalle1a86062011-08-18 17:46:10 -0700572 dmov_conf[adm].clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 if (IS_ERR(dmov_conf[adm].clk)) {
574 printk(KERN_ERR "%s: Error getting adm_clk\n", __func__);
575 dmov_conf[adm].clk = NULL;
576 return -ENOENT;
577 }
578
Matt Wagantalle1a86062011-08-18 17:46:10 -0700579 dmov_conf[adm].pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 if (IS_ERR(dmov_conf[adm].pclk)) {
581 dmov_conf[adm].pclk = NULL;
582 /* pclk not present on all SoCs, don't bail on failure */
583 }
584
Matt Wagantalle1a86062011-08-18 17:46:10 -0700585 dmov_conf[adm].ebiclk = clk_get(&pdev->dev, "mem_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 if (IS_ERR(dmov_conf[adm].ebiclk)) {
587 dmov_conf[adm].ebiclk = NULL;
588 /* ebiclk not present on all SoCs, don't bail on failure */
589 } else {
590 ret = clk_set_rate(dmov_conf[adm].ebiclk, 27000000);
591 if (ret)
592 return -ENOENT;
593 }
594
595 return 0;
596}
597
598static void config_datamover(int adm)
599{
600#ifdef CONFIG_MSM_ADM3
601 int i;
602 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
603 struct msm_dmov_chan_conf *chan_conf =
604 dmov_conf[adm].chan_conf;
605 unsigned conf;
606 /* Only configure scorpion channels */
607 if (chan_conf[i].sd <= 1) {
608 conf = readl_relaxed(DMOV_REG(DMOV_CONF(i), adm));
609 conf &= ~DMOV_CONF_SD(7);
610 conf |= DMOV_CONF_SD(chan_conf[i].sd);
611 writel_relaxed(conf | DMOV_CONF_SHADOW_EN,
612 DMOV_REG(DMOV_CONF(i), adm));
613 }
614 }
615 for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
616 struct msm_dmov_crci_conf *crci_conf =
617 dmov_conf[adm].crci_conf;
618
619 writel_relaxed(DMOV_CRCI_CTL_BLK_SZ(crci_conf[i].blk_size),
620 DMOV_REG(DMOV_CRCI_CTL(i), adm));
621 }
622#endif
623}
624
625static int msm_dmov_probe(struct platform_device *pdev)
626{
627 int adm = (pdev->id >= 0) ? pdev->id : 0;
628 int i;
629 int ret;
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700630 struct msm_dmov_pdata *pdata = pdev->dev.platform_data;
631 struct resource *irqres =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 platform_get_resource(pdev, IORESOURCE_IRQ, 0);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700633 struct resource *mres =
634 platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700636 if (pdata) {
637 dmov_conf[adm].sd = pdata->sd;
638 dmov_conf[adm].sd_size = pdata->sd_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 }
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700640 if (!dmov_conf[adm].sd_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 return -ENXIO;
642
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700643 if (!irqres || !irqres->start)
644 return -ENXIO;
645 dmov_conf[adm].irq = irqres->start;
646
647 if (!mres || !mres->start)
648 return -ENXIO;
649 dmov_conf[adm].base = ioremap_nocache(mres->start, resource_size(mres));
650 if (!dmov_conf[adm].base)
651 return -ENOMEM;
652
Jeff Ohlstein48c02282012-04-11 14:54:23 -0700653 ret = request_irq(dmov_conf[adm].irq, msm_datamover_irq_handler,
654 0, "msmdatamover", NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700655 if (ret) {
656 PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
657 dmov_conf[adm].irq);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700658 goto out_map;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 }
660 disable_irq(dmov_conf[adm].irq);
661 ret = msm_dmov_init_clocks(pdev);
662 if (ret) {
663 PRINT_ERROR("Requesting ADM%d clocks failed\n", adm);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700664 goto out_irq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 }
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700666 ret = msm_dmov_clk_toggle(adm, 1);
667 if (ret) {
668 PRINT_ERROR("Enabling ADM%d clocks failed\n", adm);
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700669 goto out_irq;
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700670 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671
672 config_datamover(adm);
673 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
674 INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
675 INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
676
677 writel_relaxed(DMOV_RSLT_CONF_IRQ_EN
678 | DMOV_RSLT_CONF_FORCE_FLUSH_RSLT,
679 DMOV_REG(DMOV_RSLT_CONF(i), adm));
680 }
681 wmb();
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700682 msm_dmov_clk_toggle(adm, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 return ret;
Jeff Ohlstein905f1ce2011-09-07 18:50:18 -0700684out_irq:
685 free_irq(dmov_conf[adm].irq, NULL);
686out_map:
687 iounmap(dmov_conf[adm].base);
688 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689}
690
691static struct platform_driver msm_dmov_driver = {
692 .probe = msm_dmov_probe,
693 .driver = {
694 .name = MODULE_NAME,
695 .owner = THIS_MODULE,
696 .pm = &msm_dmov_dev_pm_ops,
697 },
698};
699
700/* static int __init */
701static int __init msm_init_datamover(void)
702{
703 int ret;
704 ret = platform_driver_register(&msm_dmov_driver);
705 if (ret)
706 return ret;
707 return 0;
708}
709arch_initcall(msm_init_datamover);