| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Renesas SuperH DMA Engine support | 
|  | 3 | * | 
|  | 4 | * base is drivers/dma/flsdma.c | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 
|  | 7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | 
|  | 8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 
|  | 9 | * | 
|  | 10 | * This is free software; you can redistribute it and/or modify | 
|  | 11 | * it under the terms of the GNU General Public License as published by | 
|  | 12 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 13 | * (at your option) any later version. | 
|  | 14 | * | 
|  | 15 | * - DMA of SuperH does not have Hardware DMA chain mode. | 
|  | 16 | * - MAX DMA size is 16MB. | 
|  | 17 | * | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #include <linux/init.h> | 
|  | 21 | #include <linux/module.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 22 | #include <linux/slab.h> | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 23 | #include <linux/interrupt.h> | 
|  | 24 | #include <linux/dmaengine.h> | 
|  | 25 | #include <linux/delay.h> | 
|  | 26 | #include <linux/dma-mapping.h> | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 27 | #include <linux/platform_device.h> | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 28 | #include <linux/pm_runtime.h> | 
| Magnus Damm | b2623a6 | 2010-03-19 04:47:10 +0000 | [diff] [blame] | 29 | #include <linux/sh_dma.h> | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 30 | #include <linux/notifier.h> | 
|  | 31 | #include <linux/kdebug.h> | 
|  | 32 | #include <linux/spinlock.h> | 
|  | 33 | #include <linux/rculist.h> | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 34 | #include "shdma.h" | 
|  | 35 |  | 
|  | 36 | /* DMA descriptor control */ | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 37 | enum sh_dmae_desc_status { | 
|  | 38 | DESC_IDLE, | 
|  | 39 | DESC_PREPARED, | 
|  | 40 | DESC_SUBMITTED, | 
|  | 41 | DESC_COMPLETED,	/* completed, have to call callback */ | 
|  | 42 | DESC_WAITING,	/* callback called, waiting for ack / re-submit */ | 
|  | 43 | }; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 44 |  | 
|  | 45 | #define NR_DESCS_PER_CHANNEL 32 | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 46 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | 
|  | 47 | #define LOG2_DEFAULT_XFER_SIZE	2 | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 48 |  | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 49 | /* | 
|  | 50 | * Used for write-side mutual exclusion for the global device list, | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 51 | * read-side synchronization by way of RCU, and per-controller data. | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 52 | */ | 
|  | 53 | static DEFINE_SPINLOCK(sh_dmae_lock); | 
|  | 54 | static LIST_HEAD(sh_dmae_devices); | 
|  | 55 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 56 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | 
| Magnus Damm | 02ca508 | 2010-03-19 04:46:47 +0000 | [diff] [blame] | 57 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 58 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 59 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 
|  | 60 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 61 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 
|  | 62 | { | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 63 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 64 | } | 
|  | 65 |  | 
|  | 66 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 
|  | 67 | { | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 68 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | static u16 dmaor_read(struct sh_dmae_device *shdev) | 
|  | 72 | { | 
| Kuninori Morimoto | e76c3af | 2011-06-17 08:20:56 +0000 | [diff] [blame] | 73 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | 
|  | 74 |  | 
|  | 75 | if (shdev->pdata->dmaor_is_32bit) | 
|  | 76 | return __raw_readl(addr); | 
|  | 77 | else | 
|  | 78 | return __raw_readw(addr); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
|  | 81 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | 
|  | 82 | { | 
| Kuninori Morimoto | e76c3af | 2011-06-17 08:20:56 +0000 | [diff] [blame] | 83 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | 
|  | 84 |  | 
|  | 85 | if (shdev->pdata->dmaor_is_32bit) | 
|  | 86 | __raw_writel(data, addr); | 
|  | 87 | else | 
|  | 88 | __raw_writew(data, addr); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 89 | } | 
|  | 90 |  | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 91 | static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) | 
|  | 92 | { | 
|  | 93 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | 
|  | 94 |  | 
|  | 95 | __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) | 
|  | 99 | { | 
|  | 100 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | 
|  | 101 |  | 
|  | 102 | return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 103 | } | 
|  | 104 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 105 | /* | 
|  | 106 | * Reset DMA controller | 
|  | 107 | * | 
|  | 108 | * SH7780 has two DMAOR register | 
|  | 109 | */ | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 110 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 111 | { | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 112 | unsigned short dmaor; | 
|  | 113 | unsigned long flags; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 114 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 115 | spin_lock_irqsave(&sh_dmae_lock, flags); | 
|  | 116 |  | 
|  | 117 | dmaor = dmaor_read(shdev); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 118 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 119 |  | 
|  | 120 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 121 | } | 
|  | 122 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 123 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 124 | { | 
|  | 125 | unsigned short dmaor; | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 126 | unsigned long flags; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 127 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 128 | spin_lock_irqsave(&sh_dmae_lock, flags); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 129 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 130 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | 
|  | 131 |  | 
|  | 132 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); | 
|  | 133 |  | 
|  | 134 | dmaor = dmaor_read(shdev); | 
|  | 135 |  | 
|  | 136 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | 
|  | 137 |  | 
|  | 138 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | 
|  | 139 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); | 
|  | 140 | return -EIO; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 141 | } | 
|  | 142 | return 0; | 
|  | 143 | } | 
|  | 144 |  | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 145 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 146 | { | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 147 | u32 chcr = chcr_read(sh_chan); | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 148 |  | 
|  | 149 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) | 
|  | 150 | return true; /* working */ | 
|  | 151 |  | 
|  | 152 | return false; /* waiting */ | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 153 | } | 
|  | 154 |  | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 155 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 156 | { | 
| Kuninori Morimoto | c4e0dd7 | 2011-06-16 05:08:09 +0000 | [diff] [blame] | 157 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 158 | struct sh_dmae_pdata *pdata = shdev->pdata; | 
|  | 159 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | 
|  | 160 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | 
| Guennadi Liakhovetski | 623b4ac | 2010-02-03 14:44:12 +0000 | [diff] [blame] | 161 |  | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 162 | if (cnt >= pdata->ts_shift_num) | 
|  | 163 | cnt = 0; | 
|  | 164 |  | 
|  | 165 | return pdata->ts_shift[cnt]; | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | 
|  | 169 | { | 
| Kuninori Morimoto | c4e0dd7 | 2011-06-16 05:08:09 +0000 | [diff] [blame] | 170 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 171 | struct sh_dmae_pdata *pdata = shdev->pdata; | 
|  | 172 | int i; | 
|  | 173 |  | 
|  | 174 | for (i = 0; i < pdata->ts_shift_num; i++) | 
|  | 175 | if (pdata->ts_shift[i] == l2size) | 
|  | 176 | break; | 
|  | 177 |  | 
|  | 178 | if (i == pdata->ts_shift_num) | 
|  | 179 | i = 0; | 
|  | 180 |  | 
|  | 181 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | 
|  | 182 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 183 | } | 
|  | 184 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 185 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 186 | { | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 187 | sh_dmae_writel(sh_chan, hw->sar, SAR); | 
|  | 188 | sh_dmae_writel(sh_chan, hw->dar, DAR); | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 189 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 190 | } | 
|  | 191 |  | 
|  | 192 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 
|  | 193 | { | 
| Kuninori Morimoto | 67c6269 | 2011-06-17 08:20:51 +0000 | [diff] [blame] | 194 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 195 | u32 chcr = chcr_read(sh_chan); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 196 |  | 
| Kuninori Morimoto | 260bf2c | 2011-06-17 08:21:05 +0000 | [diff] [blame] | 197 | if (shdev->pdata->needs_tend_set) | 
|  | 198 | sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); | 
|  | 199 |  | 
| Kuninori Morimoto | 67c6269 | 2011-06-17 08:20:51 +0000 | [diff] [blame] | 200 | chcr |= CHCR_DE | shdev->chcr_ie_bit; | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 201 | chcr_write(sh_chan, chcr & ~CHCR_TE); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 202 | } | 
|  | 203 |  | 
|  | 204 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | 
|  | 205 | { | 
| Kuninori Morimoto | 67c6269 | 2011-06-17 08:20:51 +0000 | [diff] [blame] | 206 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 207 | u32 chcr = chcr_read(sh_chan); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 208 |  | 
| Kuninori Morimoto | 67c6269 | 2011-06-17 08:20:51 +0000 | [diff] [blame] | 209 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 210 | chcr_write(sh_chan, chcr); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 211 | } | 
|  | 212 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 213 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 
|  | 214 | { | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 215 | /* | 
|  | 216 | * Default configuration for dual address memory-memory transfer. | 
|  | 217 | * 0x400 represents auto-request. | 
|  | 218 | */ | 
|  | 219 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | 
|  | 220 | LOG2_DEFAULT_XFER_SIZE); | 
|  | 221 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 222 | chcr_write(sh_chan, chcr); | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 223 | } | 
|  | 224 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 225 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 
|  | 226 | { | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 227 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 228 | if (dmae_is_busy(sh_chan)) | 
|  | 229 | return -EBUSY; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 230 |  | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 231 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 232 | chcr_write(sh_chan, val); | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 233 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 234 | return 0; | 
|  | 235 | } | 
|  | 236 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 237 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 
|  | 238 | { | 
| Kuninori Morimoto | c4e0dd7 | 2011-06-16 05:08:09 +0000 | [diff] [blame] | 239 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 240 | struct sh_dmae_pdata *pdata = shdev->pdata; | 
| Guennadi Liakhovetski | 5bac942 | 2010-04-21 15:36:49 +0000 | [diff] [blame] | 241 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | 
| Magnus Damm | 26fc02a | 2011-05-24 10:31:12 +0000 | [diff] [blame] | 242 | u16 __iomem *addr = shdev->dmars; | 
| Kuninori Morimoto | 090b918 | 2011-06-16 05:08:28 +0000 | [diff] [blame] | 243 | unsigned int shift = chan_pdata->dmars_bit; | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 244 |  | 
|  | 245 | if (dmae_is_busy(sh_chan)) | 
|  | 246 | return -EBUSY; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 247 |  | 
| Kuninori Morimoto | 260bf2c | 2011-06-17 08:21:05 +0000 | [diff] [blame] | 248 | if (pdata->no_dmars) | 
|  | 249 | return 0; | 
|  | 250 |  | 
| Magnus Damm | 26fc02a | 2011-05-24 10:31:12 +0000 | [diff] [blame] | 251 | /* in the case of a missing DMARS resource use first memory window */ | 
|  | 252 | if (!addr) | 
|  | 253 | addr = (u16 __iomem *)shdev->chan_reg; | 
|  | 254 | addr += chan_pdata->dmars / sizeof(u16); | 
|  | 255 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 256 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | 
|  | 257 | addr); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 258 |  | 
|  | 259 | return 0; | 
|  | 260 | } | 
|  | 261 |  | 
|  | 262 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | 
|  | 263 | { | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 264 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 265 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 266 | dma_async_tx_callback callback = tx->callback; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 267 | dma_cookie_t cookie; | 
|  | 268 |  | 
|  | 269 | spin_lock_bh(&sh_chan->desc_lock); | 
|  | 270 |  | 
|  | 271 | cookie = sh_chan->common.cookie; | 
|  | 272 | cookie++; | 
|  | 273 | if (cookie < 0) | 
|  | 274 | cookie = 1; | 
|  | 275 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 276 | sh_chan->common.cookie = cookie; | 
|  | 277 | tx->cookie = cookie; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 278 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 279 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | 
|  | 280 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | 
|  | 281 | /* | 
|  | 282 | * All chunks are on the global ld_free, so, we have to find | 
|  | 283 | * the end of the chain ourselves | 
|  | 284 | */ | 
|  | 285 | if (chunk != desc && (chunk->mark == DESC_IDLE || | 
|  | 286 | chunk->async_tx.cookie > 0 || | 
|  | 287 | chunk->async_tx.cookie == -EBUSY || | 
|  | 288 | &chunk->node == &sh_chan->ld_free)) | 
|  | 289 | break; | 
|  | 290 | chunk->mark = DESC_SUBMITTED; | 
|  | 291 | /* Callback goes to the last chunk */ | 
|  | 292 | chunk->async_tx.callback = NULL; | 
|  | 293 | chunk->cookie = cookie; | 
|  | 294 | list_move_tail(&chunk->node, &sh_chan->ld_queue); | 
|  | 295 | last = chunk; | 
|  | 296 | } | 
|  | 297 |  | 
|  | 298 | last->async_tx.callback = callback; | 
|  | 299 | last->async_tx.callback_param = tx->callback_param; | 
|  | 300 |  | 
|  | 301 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", | 
|  | 302 | tx->cookie, &last->async_tx, sh_chan->id, | 
|  | 303 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 304 |  | 
|  | 305 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 306 |  | 
|  | 307 | return cookie; | 
|  | 308 | } | 
|  | 309 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 310 | /* Called with desc_lock held */ | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 311 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | 
|  | 312 | { | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 313 | struct sh_desc *desc; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 314 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 315 | list_for_each_entry(desc, &sh_chan->ld_free, node) | 
|  | 316 | if (desc->mark != DESC_PREPARED) { | 
|  | 317 | BUG_ON(desc->mark != DESC_IDLE); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 318 | list_del(&desc->node); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 319 | return desc; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 320 | } | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 321 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 322 | return NULL; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 323 | } | 
|  | 324 |  | 
| Guennadi Liakhovetski | 5bac942 | 2010-04-21 15:36:49 +0000 | [diff] [blame] | 325 | static const struct sh_dmae_slave_config *sh_dmae_find_slave( | 
| Magnus Damm | 4bab9d4 | 2010-03-19 04:46:38 +0000 | [diff] [blame] | 326 | struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 327 | { | 
| Kuninori Morimoto | c4e0dd7 | 2011-06-16 05:08:09 +0000 | [diff] [blame] | 328 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 329 | struct sh_dmae_pdata *pdata = shdev->pdata; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 330 | int i; | 
|  | 331 |  | 
| Magnus Damm | 02ca508 | 2010-03-19 04:46:47 +0000 | [diff] [blame] | 332 | if (param->slave_id >= SH_DMA_SLAVE_NUMBER) | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 333 | return NULL; | 
|  | 334 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 335 | for (i = 0; i < pdata->slave_num; i++) | 
| Magnus Damm | 4bab9d4 | 2010-03-19 04:46:38 +0000 | [diff] [blame] | 336 | if (pdata->slave[i].slave_id == param->slave_id) | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 337 | return pdata->slave + i; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 338 |  | 
|  | 339 | return NULL; | 
|  | 340 | } | 
|  | 341 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 342 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | 
|  | 343 | { | 
|  | 344 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 
|  | 345 | struct sh_desc *desc; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 346 | struct sh_dmae_slave *param = chan->private; | 
| Guennadi Liakhovetski | 83515bc | 2010-04-19 08:39:39 +0000 | [diff] [blame] | 347 | int ret; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 348 |  | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 349 | pm_runtime_get_sync(sh_chan->dev); | 
|  | 350 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 351 | /* | 
|  | 352 | * This relies on the guarantee from dmaengine that alloc_chan_resources | 
|  | 353 | * never runs concurrently with itself or free_chan_resources. | 
|  | 354 | */ | 
|  | 355 | if (param) { | 
| Guennadi Liakhovetski | 5bac942 | 2010-04-21 15:36:49 +0000 | [diff] [blame] | 356 | const struct sh_dmae_slave_config *cfg; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 357 |  | 
| Magnus Damm | 4bab9d4 | 2010-03-19 04:46:38 +0000 | [diff] [blame] | 358 | cfg = sh_dmae_find_slave(sh_chan, param); | 
| Guennadi Liakhovetski | 83515bc | 2010-04-19 08:39:39 +0000 | [diff] [blame] | 359 | if (!cfg) { | 
|  | 360 | ret = -EINVAL; | 
|  | 361 | goto efindslave; | 
|  | 362 | } | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 363 |  | 
| Guennadi Liakhovetski | 83515bc | 2010-04-19 08:39:39 +0000 | [diff] [blame] | 364 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { | 
|  | 365 | ret = -EBUSY; | 
|  | 366 | goto etestused; | 
|  | 367 | } | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 368 |  | 
|  | 369 | param->config = cfg; | 
|  | 370 |  | 
|  | 371 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 
|  | 372 | dmae_set_chcr(sh_chan, cfg->chcr); | 
| Guennadi Liakhovetski | a1b2cc5 | 2011-05-31 09:25:16 +0000 | [diff] [blame] | 373 | } else { | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 374 | dmae_init(sh_chan); | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 375 | } | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 376 |  | 
|  | 377 | spin_lock_bh(&sh_chan->desc_lock); | 
|  | 378 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | 
|  | 379 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 380 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); | 
|  | 381 | if (!desc) { | 
|  | 382 | spin_lock_bh(&sh_chan->desc_lock); | 
|  | 383 | break; | 
|  | 384 | } | 
|  | 385 | dma_async_tx_descriptor_init(&desc->async_tx, | 
|  | 386 | &sh_chan->common); | 
|  | 387 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 388 | desc->mark = DESC_IDLE; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 389 |  | 
|  | 390 | spin_lock_bh(&sh_chan->desc_lock); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 391 | list_add(&desc->node, &sh_chan->ld_free); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 392 | sh_chan->descs_allocated++; | 
|  | 393 | } | 
|  | 394 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 395 |  | 
| Guennadi Liakhovetski | 83515bc | 2010-04-19 08:39:39 +0000 | [diff] [blame] | 396 | if (!sh_chan->descs_allocated) { | 
|  | 397 | ret = -ENOMEM; | 
|  | 398 | goto edescalloc; | 
|  | 399 | } | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 400 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 401 | return sh_chan->descs_allocated; | 
| Guennadi Liakhovetski | 83515bc | 2010-04-19 08:39:39 +0000 | [diff] [blame] | 402 |  | 
|  | 403 | edescalloc: | 
|  | 404 | if (param) | 
|  | 405 | clear_bit(param->slave_id, sh_dmae_slave_used); | 
|  | 406 | etestused: | 
|  | 407 | efindslave: | 
|  | 408 | pm_runtime_put(sh_chan->dev); | 
|  | 409 | return ret; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 410 | } | 
|  | 411 |  | 
|  | 412 | /* | 
|  | 413 | * sh_dma_free_chan_resources - Free all resources of the channel. | 
|  | 414 | */ | 
|  | 415 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) | 
|  | 416 | { | 
|  | 417 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 
|  | 418 | struct sh_desc *desc, *_desc; | 
|  | 419 | LIST_HEAD(list); | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 420 | int descs = sh_chan->descs_allocated; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 421 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 422 | /* Protect against ISR */ | 
|  | 423 | spin_lock_irq(&sh_chan->desc_lock); | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 424 | dmae_halt(sh_chan); | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 425 | spin_unlock_irq(&sh_chan->desc_lock); | 
|  | 426 |  | 
|  | 427 | /* Now no new interrupts will occur */ | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 428 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 429 | /* Prepared and not submitted descriptors can still be on the queue */ | 
|  | 430 | if (!list_empty(&sh_chan->ld_queue)) | 
|  | 431 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 
|  | 432 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 433 | if (chan->private) { | 
|  | 434 | /* The caller is holding dma_list_mutex */ | 
|  | 435 | struct sh_dmae_slave *param = chan->private; | 
|  | 436 | clear_bit(param->slave_id, sh_dmae_slave_used); | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 437 | chan->private = NULL; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 438 | } | 
|  | 439 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 440 | spin_lock_bh(&sh_chan->desc_lock); | 
|  | 441 |  | 
|  | 442 | list_splice_init(&sh_chan->ld_free, &list); | 
|  | 443 | sh_chan->descs_allocated = 0; | 
|  | 444 |  | 
|  | 445 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 446 |  | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 447 | if (descs > 0) | 
|  | 448 | pm_runtime_put(sh_chan->dev); | 
|  | 449 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 450 | list_for_each_entry_safe(desc, _desc, &list, node) | 
|  | 451 | kfree(desc); | 
|  | 452 | } | 
|  | 453 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 454 | /** | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 455 | * sh_dmae_add_desc - get, set up and return one transfer descriptor | 
|  | 456 | * @sh_chan:	DMA channel | 
|  | 457 | * @flags:	DMA transfer flags | 
|  | 458 | * @dest:	destination DMA address, incremented when direction equals | 
|  | 459 | *		DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | 
|  | 460 | * @src:	source DMA address, incremented when direction equals | 
|  | 461 | *		DMA_TO_DEVICE or DMA_BIDIRECTIONAL | 
|  | 462 | * @len:	DMA transfer length | 
|  | 463 | * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY | 
|  | 464 | * @direction:	needed for slave DMA to decide which address to keep constant, | 
|  | 465 | *		equals DMA_BIDIRECTIONAL for MEMCPY | 
|  | 466 | * Returns 0 or an error | 
|  | 467 | * Locks: called with desc_lock held | 
|  | 468 | */ | 
|  | 469 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | 
|  | 470 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | 
|  | 471 | struct sh_desc **first, enum dma_data_direction direction) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 472 | { | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 473 | struct sh_desc *new; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 474 | size_t copy_size; | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 475 |  | 
|  | 476 | if (!*len) | 
|  | 477 | return NULL; | 
|  | 478 |  | 
|  | 479 | /* Allocate the link descriptor from the free list */ | 
|  | 480 | new = sh_dmae_get_desc(sh_chan); | 
|  | 481 | if (!new) { | 
|  | 482 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | 
|  | 483 | return NULL; | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); | 
|  | 487 |  | 
|  | 488 | new->hw.sar = *src; | 
|  | 489 | new->hw.dar = *dest; | 
|  | 490 | new->hw.tcr = copy_size; | 
|  | 491 |  | 
|  | 492 | if (!*first) { | 
|  | 493 | /* First desc */ | 
|  | 494 | new->async_tx.cookie = -EBUSY; | 
|  | 495 | *first = new; | 
|  | 496 | } else { | 
|  | 497 | /* Other desc - invisible to the user */ | 
|  | 498 | new->async_tx.cookie = -EINVAL; | 
|  | 499 | } | 
|  | 500 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 501 | dev_dbg(sh_chan->dev, | 
|  | 502 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 503 | copy_size, *len, *src, *dest, &new->async_tx, | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 504 | new->async_tx.cookie, sh_chan->xmit_shift); | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 505 |  | 
|  | 506 | new->mark = DESC_PREPARED; | 
|  | 507 | new->async_tx.flags = flags; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 508 | new->direction = direction; | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 509 |  | 
|  | 510 | *len -= copy_size; | 
|  | 511 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | 
|  | 512 | *src += copy_size; | 
|  | 513 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | 
|  | 514 | *dest += copy_size; | 
|  | 515 |  | 
|  | 516 | return new; | 
|  | 517 | } | 
|  | 518 |  | 
|  | 519 | /* | 
|  | 520 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | 
|  | 521 | * | 
|  | 522 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | 
|  | 523 | * converted to scatter-gather to guarantee consistent locking and a correct | 
|  | 524 | * list manipulation. For slave DMA direction carries the usual meaning, and, | 
|  | 525 | * logically, the SG list is RAM and the addr variable contains slave address, | 
|  | 526 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | 
|  | 527 | * and the SG list contains only one element and points at the source buffer. | 
|  | 528 | */ | 
|  | 529 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | 
|  | 530 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | 
|  | 531 | enum dma_data_direction direction, unsigned long flags) | 
|  | 532 | { | 
|  | 533 | struct scatterlist *sg; | 
|  | 534 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 535 | LIST_HEAD(tx_list); | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 536 | int chunks = 0; | 
|  | 537 | int i; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 538 |  | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 539 | if (!sg_len) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 540 | return NULL; | 
|  | 541 |  | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 542 | for_each_sg(sgl, sg, sg_len, i) | 
|  | 543 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | 
|  | 544 | (SH_DMA_TCR_MAX + 1); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 545 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 546 | /* Have to lock the whole loop to protect against concurrent release */ | 
|  | 547 | spin_lock_bh(&sh_chan->desc_lock); | 
|  | 548 |  | 
|  | 549 | /* | 
|  | 550 | * Chaining: | 
|  | 551 | * first descriptor is what user is dealing with in all API calls, its | 
|  | 552 | *	cookie is at first set to -EBUSY, at tx-submit to a positive | 
|  | 553 | *	number | 
|  | 554 | * if more than one chunk is needed further chunks have cookie = -EINVAL | 
|  | 555 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | 
|  | 556 | * all chunks are linked onto the tx_list head with their .node heads | 
|  | 557 | *	only during this function, then they are immediately spliced | 
|  | 558 | *	back onto the free list in form of a chain | 
|  | 559 | */ | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 560 | for_each_sg(sgl, sg, sg_len, i) { | 
|  | 561 | dma_addr_t sg_addr = sg_dma_address(sg); | 
|  | 562 | size_t len = sg_dma_len(sg); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 563 |  | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 564 | if (!len) | 
|  | 565 | goto err_get_desc; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 566 |  | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 567 | do { | 
|  | 568 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", | 
|  | 569 | i, sg, len, (unsigned long long)sg_addr); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 570 |  | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 571 | if (direction == DMA_FROM_DEVICE) | 
|  | 572 | new = sh_dmae_add_desc(sh_chan, flags, | 
|  | 573 | &sg_addr, addr, &len, &first, | 
|  | 574 | direction); | 
|  | 575 | else | 
|  | 576 | new = sh_dmae_add_desc(sh_chan, flags, | 
|  | 577 | addr, &sg_addr, &len, &first, | 
|  | 578 | direction); | 
|  | 579 | if (!new) | 
|  | 580 | goto err_get_desc; | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 581 |  | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 582 | new->chunks = chunks--; | 
|  | 583 | list_add_tail(&new->node, &tx_list); | 
|  | 584 | } while (len); | 
|  | 585 | } | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 586 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 587 | if (new != first) | 
|  | 588 | new->async_tx.cookie = -ENOSPC; | 
|  | 589 |  | 
|  | 590 | /* Put them back on the free list, so, they don't get lost */ | 
|  | 591 | list_splice_tail(&tx_list, &sh_chan->ld_free); | 
|  | 592 |  | 
|  | 593 | spin_unlock_bh(&sh_chan->desc_lock); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 594 |  | 
|  | 595 | return &first->async_tx; | 
| Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 596 |  | 
|  | 597 | err_get_desc: | 
|  | 598 | list_for_each_entry(new, &tx_list, node) | 
|  | 599 | new->mark = DESC_IDLE; | 
|  | 600 | list_splice(&tx_list, &sh_chan->ld_free); | 
|  | 601 |  | 
|  | 602 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 603 |  | 
|  | 604 | return NULL; | 
|  | 605 | } | 
|  | 606 |  | 
|  | 607 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 
|  | 608 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 
|  | 609 | size_t len, unsigned long flags) | 
|  | 610 | { | 
|  | 611 | struct sh_dmae_chan *sh_chan; | 
|  | 612 | struct scatterlist sg; | 
|  | 613 |  | 
|  | 614 | if (!chan || !len) | 
|  | 615 | return NULL; | 
|  | 616 |  | 
|  | 617 | sh_chan = to_sh_chan(chan); | 
|  | 618 |  | 
|  | 619 | sg_init_table(&sg, 1); | 
|  | 620 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | 
|  | 621 | offset_in_page(dma_src)); | 
|  | 622 | sg_dma_address(&sg) = dma_src; | 
|  | 623 | sg_dma_len(&sg) = len; | 
|  | 624 |  | 
|  | 625 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | 
|  | 626 | flags); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 627 | } | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 628 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 629 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | 
|  | 630 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 
|  | 631 | enum dma_data_direction direction, unsigned long flags) | 
|  | 632 | { | 
|  | 633 | struct sh_dmae_slave *param; | 
|  | 634 | struct sh_dmae_chan *sh_chan; | 
| Guennadi Liakhovetski | 5bac942 | 2010-04-21 15:36:49 +0000 | [diff] [blame] | 635 | dma_addr_t slave_addr; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 636 |  | 
|  | 637 | if (!chan) | 
|  | 638 | return NULL; | 
|  | 639 |  | 
|  | 640 | sh_chan = to_sh_chan(chan); | 
|  | 641 | param = chan->private; | 
|  | 642 |  | 
|  | 643 | /* Someone calling slave DMA on a public channel? */ | 
|  | 644 | if (!param || !sg_len) { | 
|  | 645 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | 
|  | 646 | __func__, param, sg_len, param ? param->slave_id : -1); | 
|  | 647 | return NULL; | 
|  | 648 | } | 
|  | 649 |  | 
| Dan Carpenter | 9f9ff20 | 2010-08-14 11:01:45 +0200 | [diff] [blame] | 650 | slave_addr = param->config->addr; | 
|  | 651 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 652 | /* | 
|  | 653 | * if (param != NULL), this is a successfully requested slave channel, | 
|  | 654 | * therefore param->config != NULL too. | 
|  | 655 | */ | 
| Guennadi Liakhovetski | 5bac942 | 2010-04-21 15:36:49 +0000 | [diff] [blame] | 656 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 657 | direction, flags); | 
|  | 658 | } | 
|  | 659 |  | 
| Linus Walleij | 0582763 | 2010-05-17 16:30:42 -0700 | [diff] [blame] | 660 | static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 
|  | 661 | unsigned long arg) | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 662 | { | 
|  | 663 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 
|  | 664 |  | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 665 | /* Only supports DMA_TERMINATE_ALL */ | 
|  | 666 | if (cmd != DMA_TERMINATE_ALL) | 
|  | 667 | return -ENXIO; | 
|  | 668 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 669 | if (!chan) | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 670 | return -EINVAL; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 671 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 672 | spin_lock_bh(&sh_chan->desc_lock); | 
| Guennadi Liakhovetski | c014906 | 2010-02-18 16:30:02 +0000 | [diff] [blame] | 673 | dmae_halt(sh_chan); | 
|  | 674 |  | 
| Guennadi Liakhovetski | c014906 | 2010-02-18 16:30:02 +0000 | [diff] [blame] | 675 | if (!list_empty(&sh_chan->ld_queue)) { | 
|  | 676 | /* Record partial transfer */ | 
|  | 677 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | 
|  | 678 | struct sh_desc, node); | 
|  | 679 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | 
|  | 680 | sh_chan->xmit_shift; | 
|  | 681 |  | 
|  | 682 | } | 
|  | 683 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 684 |  | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 685 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 686 |  | 
|  | 687 | return 0; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 688 | } | 
|  | 689 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 690 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 
|  | 691 | { | 
|  | 692 | struct sh_desc *desc, *_desc; | 
|  | 693 | /* Is the "exposed" head of a chain acked? */ | 
|  | 694 | bool head_acked = false; | 
|  | 695 | dma_cookie_t cookie = 0; | 
|  | 696 | dma_async_tx_callback callback = NULL; | 
|  | 697 | void *param = NULL; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 698 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 699 | spin_lock_bh(&sh_chan->desc_lock); | 
|  | 700 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | 
|  | 701 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 
|  | 702 |  | 
|  | 703 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | 
|  | 704 | BUG_ON(desc->mark != DESC_SUBMITTED && | 
|  | 705 | desc->mark != DESC_COMPLETED && | 
|  | 706 | desc->mark != DESC_WAITING); | 
|  | 707 |  | 
|  | 708 | /* | 
|  | 709 | * queue is ordered, and we use this loop to (1) clean up all | 
|  | 710 | * completed descriptors, and to (2) update descriptor flags of | 
|  | 711 | * any chunks in a (partially) completed chain | 
|  | 712 | */ | 
|  | 713 | if (!all && desc->mark == DESC_SUBMITTED && | 
|  | 714 | desc->cookie != cookie) | 
|  | 715 | break; | 
|  | 716 |  | 
|  | 717 | if (tx->cookie > 0) | 
|  | 718 | cookie = tx->cookie; | 
|  | 719 |  | 
|  | 720 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 721 | if (sh_chan->completed_cookie != desc->cookie - 1) | 
|  | 722 | dev_dbg(sh_chan->dev, | 
|  | 723 | "Completing cookie %d, expected %d\n", | 
|  | 724 | desc->cookie, | 
|  | 725 | sh_chan->completed_cookie + 1); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 726 | sh_chan->completed_cookie = desc->cookie; | 
|  | 727 | } | 
|  | 728 |  | 
|  | 729 | /* Call callback on the last chunk */ | 
|  | 730 | if (desc->mark == DESC_COMPLETED && tx->callback) { | 
|  | 731 | desc->mark = DESC_WAITING; | 
|  | 732 | callback = tx->callback; | 
|  | 733 | param = tx->callback_param; | 
|  | 734 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", | 
|  | 735 | tx->cookie, tx, sh_chan->id); | 
|  | 736 | BUG_ON(desc->chunks != 1); | 
|  | 737 | break; | 
|  | 738 | } | 
|  | 739 |  | 
|  | 740 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | 
|  | 741 | if (desc->mark == DESC_COMPLETED) { | 
|  | 742 | BUG_ON(tx->cookie < 0); | 
|  | 743 | desc->mark = DESC_WAITING; | 
|  | 744 | } | 
|  | 745 | head_acked = async_tx_test_ack(tx); | 
|  | 746 | } else { | 
|  | 747 | switch (desc->mark) { | 
|  | 748 | case DESC_COMPLETED: | 
|  | 749 | desc->mark = DESC_WAITING; | 
|  | 750 | /* Fall through */ | 
|  | 751 | case DESC_WAITING: | 
|  | 752 | if (head_acked) | 
|  | 753 | async_tx_ack(&desc->async_tx); | 
|  | 754 | } | 
|  | 755 | } | 
|  | 756 |  | 
|  | 757 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", | 
|  | 758 | tx, tx->cookie); | 
|  | 759 |  | 
|  | 760 | if (((desc->mark == DESC_COMPLETED || | 
|  | 761 | desc->mark == DESC_WAITING) && | 
|  | 762 | async_tx_test_ack(&desc->async_tx)) || all) { | 
|  | 763 | /* Remove from ld_queue list */ | 
|  | 764 | desc->mark = DESC_IDLE; | 
|  | 765 | list_move(&desc->node, &sh_chan->ld_free); | 
|  | 766 | } | 
|  | 767 | } | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 768 |  | 
|  | 769 | if (all && !callback) | 
|  | 770 | /* | 
|  | 771 | * Terminating and the loop completed normally: forgive | 
|  | 772 | * uncompleted cookies | 
|  | 773 | */ | 
|  | 774 | sh_chan->completed_cookie = sh_chan->common.cookie; | 
|  | 775 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 776 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 777 |  | 
|  | 778 | if (callback) | 
|  | 779 | callback(param); | 
|  | 780 |  | 
|  | 781 | return callback; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 782 | } | 
|  | 783 |  | 
|  | 784 | /* | 
|  | 785 | * sh_chan_ld_cleanup - Clean up link descriptors | 
|  | 786 | * | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 787 | * This function cleans up the ld_queue of DMA channel. | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 788 | */ | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 789 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 790 | { | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 791 | while (__ld_cleanup(sh_chan, all)) | 
|  | 792 | ; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 793 | } | 
|  | 794 |  | 
|  | 795 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 
|  | 796 | { | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 797 | struct sh_desc *desc; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 798 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 799 | spin_lock_bh(&sh_chan->desc_lock); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 800 | /* DMA work check */ | 
| Kuninori Morimoto | 1d2c098 | 2011-06-16 05:08:18 +0000 | [diff] [blame] | 801 | if (dmae_is_busy(sh_chan)) | 
|  | 802 | goto sh_chan_xfer_ld_queue_end; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 803 |  | 
| Justin P. Mattock | 5a3a765 | 2011-01-19 15:36:38 +0100 | [diff] [blame] | 804 | /* Find the first not transferred descriptor */ | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 805 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | 
|  | 806 | if (desc->mark == DESC_SUBMITTED) { | 
| Guennadi Liakhovetski | c014906 | 2010-02-18 16:30:02 +0000 | [diff] [blame] | 807 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | 
|  | 808 | desc->async_tx.cookie, sh_chan->id, | 
|  | 809 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 810 | /* Get the ld start address from ld_queue */ | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 811 | dmae_set_reg(sh_chan, &desc->hw); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 812 | dmae_start(sh_chan); | 
|  | 813 | break; | 
|  | 814 | } | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 815 |  | 
| Kuninori Morimoto | 1d2c098 | 2011-06-16 05:08:18 +0000 | [diff] [blame] | 816 | sh_chan_xfer_ld_queue_end: | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 817 | spin_unlock_bh(&sh_chan->desc_lock); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 818 | } | 
|  | 819 |  | 
|  | 820 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | 
|  | 821 | { | 
|  | 822 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 
|  | 823 | sh_chan_xfer_ld_queue(sh_chan); | 
|  | 824 | } | 
|  | 825 |  | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 826 | static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 827 | dma_cookie_t cookie, | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 828 | struct dma_tx_state *txstate) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 829 | { | 
|  | 830 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 
|  | 831 | dma_cookie_t last_used; | 
|  | 832 | dma_cookie_t last_complete; | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 833 | enum dma_status status; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 834 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 835 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 836 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 837 | /* First read completed cookie to avoid a skew */ | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 838 | last_complete = sh_chan->completed_cookie; | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 839 | rmb(); | 
|  | 840 | last_used = chan->cookie; | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 841 | BUG_ON(last_complete < 0); | 
| Dan Williams | bca3469 | 2010-03-26 16:52:10 -0700 | [diff] [blame] | 842 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 843 |  | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 844 | spin_lock_bh(&sh_chan->desc_lock); | 
|  | 845 |  | 
|  | 846 | status = dma_async_is_complete(cookie, last_complete, last_used); | 
|  | 847 |  | 
|  | 848 | /* | 
|  | 849 | * If we don't find cookie on the queue, it has been aborted and we have | 
|  | 850 | * to report error | 
|  | 851 | */ | 
|  | 852 | if (status != DMA_SUCCESS) { | 
|  | 853 | struct sh_desc *desc; | 
|  | 854 | status = DMA_ERROR; | 
|  | 855 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | 
|  | 856 | if (desc->cookie == cookie) { | 
|  | 857 | status = DMA_IN_PROGRESS; | 
|  | 858 | break; | 
|  | 859 | } | 
|  | 860 | } | 
|  | 861 |  | 
|  | 862 | spin_unlock_bh(&sh_chan->desc_lock); | 
|  | 863 |  | 
|  | 864 | return status; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 865 | } | 
|  | 866 |  | 
|  | 867 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 
|  | 868 | { | 
|  | 869 | irqreturn_t ret = IRQ_NONE; | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 870 | struct sh_dmae_chan *sh_chan = data; | 
|  | 871 | u32 chcr; | 
|  | 872 |  | 
|  | 873 | spin_lock(&sh_chan->desc_lock); | 
|  | 874 |  | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 875 | chcr = chcr_read(sh_chan); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 876 |  | 
|  | 877 | if (chcr & CHCR_TE) { | 
|  | 878 | /* DMA stop */ | 
|  | 879 | dmae_halt(sh_chan); | 
|  | 880 |  | 
|  | 881 | ret = IRQ_HANDLED; | 
|  | 882 | tasklet_schedule(&sh_chan->tasklet); | 
|  | 883 | } | 
|  | 884 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 885 | spin_unlock(&sh_chan->desc_lock); | 
|  | 886 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 887 | return ret; | 
|  | 888 | } | 
|  | 889 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 890 | /* Called from error IRQ or NMI */ | 
|  | 891 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 892 | { | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 893 | unsigned int handled = 0; | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 894 | int i; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 895 |  | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 896 | /* halt the dma controller */ | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 897 | sh_dmae_ctl_stop(shdev); | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 898 |  | 
|  | 899 | /* We cannot detect, which channel caused the error, have to reset all */ | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 900 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 901 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 902 | struct sh_desc *desc; | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 903 | LIST_HEAD(dl); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 904 |  | 
|  | 905 | if (!sh_chan) | 
|  | 906 | continue; | 
|  | 907 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 908 | spin_lock(&sh_chan->desc_lock); | 
|  | 909 |  | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 910 | /* Stop the channel */ | 
|  | 911 | dmae_halt(sh_chan); | 
|  | 912 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 913 | list_splice_init(&sh_chan->ld_queue, &dl); | 
|  | 914 |  | 
|  | 915 | spin_unlock(&sh_chan->desc_lock); | 
|  | 916 |  | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 917 | /* Complete all  */ | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 918 | list_for_each_entry(desc, &dl, node) { | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 919 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 
|  | 920 | desc->mark = DESC_IDLE; | 
|  | 921 | if (tx->callback) | 
|  | 922 | tx->callback(tx->callback_param); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 923 | } | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 924 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 925 | spin_lock(&sh_chan->desc_lock); | 
|  | 926 | list_splice(&dl, &sh_chan->ld_free); | 
|  | 927 | spin_unlock(&sh_chan->desc_lock); | 
|  | 928 |  | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 929 | handled++; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 930 | } | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 931 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 932 | sh_dmae_rst(shdev); | 
| Guennadi Liakhovetski | 47a4dc2 | 2010-02-11 16:50:05 +0000 | [diff] [blame] | 933 |  | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 934 | return !!handled; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 935 | } | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 936 |  | 
|  | 937 | static irqreturn_t sh_dmae_err(int irq, void *data) | 
|  | 938 | { | 
| Yoshihiro Shimoda | ff7690b | 2011-02-09 07:46:47 +0000 | [diff] [blame] | 939 | struct sh_dmae_device *shdev = data; | 
|  | 940 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 941 | if (!(dmaor_read(shdev) & DMAOR_AE)) | 
| Yoshihiro Shimoda | ff7690b | 2011-02-09 07:46:47 +0000 | [diff] [blame] | 942 | return IRQ_NONE; | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 943 |  | 
|  | 944 | sh_dmae_reset(data); | 
|  | 945 | return IRQ_HANDLED; | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 946 | } | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 947 |  | 
|  | 948 | static void dmae_do_tasklet(unsigned long data) | 
|  | 949 | { | 
|  | 950 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 951 | struct sh_desc *desc; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 952 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 953 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | 
| Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 954 |  | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 955 | spin_lock(&sh_chan->desc_lock); | 
|  | 956 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 957 | if (desc->mark == DESC_SUBMITTED && | 
|  | 958 | ((desc->direction == DMA_FROM_DEVICE && | 
|  | 959 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | 
|  | 960 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 961 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | 
|  | 962 | desc->async_tx.cookie, &desc->async_tx, | 
|  | 963 | desc->hw.dar); | 
|  | 964 | desc->mark = DESC_COMPLETED; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 965 | break; | 
|  | 966 | } | 
|  | 967 | } | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 968 | spin_unlock(&sh_chan->desc_lock); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 969 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 970 | /* Next desc */ | 
|  | 971 | sh_chan_xfer_ld_queue(sh_chan); | 
| Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 972 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 973 | } | 
|  | 974 |  | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 975 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | 
|  | 976 | { | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 977 | /* Fast path out if NMIF is not asserted for this controller */ | 
|  | 978 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | 
|  | 979 | return false; | 
|  | 980 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 981 | return sh_dmae_reset(shdev); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 982 | } | 
|  | 983 |  | 
|  | 984 | static int sh_dmae_nmi_handler(struct notifier_block *self, | 
|  | 985 | unsigned long cmd, void *data) | 
|  | 986 | { | 
|  | 987 | struct sh_dmae_device *shdev; | 
|  | 988 | int ret = NOTIFY_DONE; | 
|  | 989 | bool triggered; | 
|  | 990 |  | 
|  | 991 | /* | 
|  | 992 | * Only concern ourselves with NMI events. | 
|  | 993 | * | 
|  | 994 | * Normally we would check the die chain value, but as this needs | 
|  | 995 | * to be architecture independent, check for NMI context instead. | 
|  | 996 | */ | 
|  | 997 | if (!in_nmi()) | 
|  | 998 | return NOTIFY_DONE; | 
|  | 999 |  | 
|  | 1000 | rcu_read_lock(); | 
|  | 1001 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | 
|  | 1002 | /* | 
|  | 1003 | * Only stop if one of the controllers has NMIF asserted, | 
|  | 1004 | * we do not want to interfere with regular address error | 
|  | 1005 | * handling or NMI events that don't concern the DMACs. | 
|  | 1006 | */ | 
|  | 1007 | triggered = sh_dmae_nmi_notify(shdev); | 
|  | 1008 | if (triggered == true) | 
|  | 1009 | ret = NOTIFY_OK; | 
|  | 1010 | } | 
|  | 1011 | rcu_read_unlock(); | 
|  | 1012 |  | 
|  | 1013 | return ret; | 
|  | 1014 | } | 
|  | 1015 |  | 
|  | 1016 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | 
|  | 1017 | .notifier_call	= sh_dmae_nmi_handler, | 
|  | 1018 |  | 
|  | 1019 | /* Run before NMI debug handler and KGDB */ | 
|  | 1020 | .priority	= 1, | 
|  | 1021 | }; | 
|  | 1022 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1023 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | 
|  | 1024 | int irq, unsigned long flags) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1025 | { | 
|  | 1026 | int err; | 
| Guennadi Liakhovetski | 5bac942 | 2010-04-21 15:36:49 +0000 | [diff] [blame] | 1027 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1028 | struct platform_device *pdev = to_platform_device(shdev->common.dev); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1029 | struct sh_dmae_chan *new_sh_chan; | 
|  | 1030 |  | 
|  | 1031 | /* alloc channel */ | 
|  | 1032 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | 
|  | 1033 | if (!new_sh_chan) { | 
| Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 1034 | dev_err(shdev->common.dev, | 
|  | 1035 | "No free memory for allocating dma channels!\n"); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1036 | return -ENOMEM; | 
|  | 1037 | } | 
|  | 1038 |  | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 1039 | /* copy struct dma_device */ | 
|  | 1040 | new_sh_chan->common.device = &shdev->common; | 
|  | 1041 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1042 | new_sh_chan->dev = shdev->common.dev; | 
|  | 1043 | new_sh_chan->id = id; | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1044 | new_sh_chan->irq = irq; | 
|  | 1045 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1046 |  | 
|  | 1047 | /* Init DMA tasklet */ | 
|  | 1048 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 
|  | 1049 | (unsigned long)new_sh_chan); | 
|  | 1050 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1051 | spin_lock_init(&new_sh_chan->desc_lock); | 
|  | 1052 |  | 
|  | 1053 | /* Init descripter manage list */ | 
|  | 1054 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | 
|  | 1055 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | 
|  | 1056 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1057 | /* Add the channel to DMA device channel list */ | 
|  | 1058 | list_add_tail(&new_sh_chan->common.device_node, | 
|  | 1059 | &shdev->common.channels); | 
|  | 1060 | shdev->common.chancnt++; | 
|  | 1061 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1062 | if (pdev->id >= 0) | 
|  | 1063 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 
|  | 1064 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); | 
|  | 1065 | else | 
|  | 1066 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 
|  | 1067 | "sh-dma%d", new_sh_chan->id); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1068 |  | 
|  | 1069 | /* set up channel irq */ | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1070 | err = request_irq(irq, &sh_dmae_interrupt, flags, | 
| Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 1071 | new_sh_chan->dev_id, new_sh_chan); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1072 | if (err) { | 
|  | 1073 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 
|  | 1074 | "with return %d\n", id, err); | 
|  | 1075 | goto err_no_irq; | 
|  | 1076 | } | 
|  | 1077 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1078 | shdev->chan[id] = new_sh_chan; | 
|  | 1079 | return 0; | 
|  | 1080 |  | 
|  | 1081 | err_no_irq: | 
|  | 1082 | /* remove from dmaengine device node */ | 
|  | 1083 | list_del(&new_sh_chan->common.device_node); | 
|  | 1084 | kfree(new_sh_chan); | 
|  | 1085 | return err; | 
|  | 1086 | } | 
|  | 1087 |  | 
|  | 1088 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | 
|  | 1089 | { | 
|  | 1090 | int i; | 
|  | 1091 |  | 
|  | 1092 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | 
|  | 1093 | if (shdev->chan[i]) { | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1094 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1095 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1096 | free_irq(sh_chan->irq, sh_chan); | 
|  | 1097 |  | 
|  | 1098 | list_del(&sh_chan->common.device_node); | 
|  | 1099 | kfree(sh_chan); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1100 | shdev->chan[i] = NULL; | 
|  | 1101 | } | 
|  | 1102 | } | 
|  | 1103 | shdev->common.chancnt = 0; | 
|  | 1104 | } | 
|  | 1105 |  | 
|  | 1106 | static int __init sh_dmae_probe(struct platform_device *pdev) | 
|  | 1107 | { | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1108 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | 
|  | 1109 | unsigned long irqflags = IRQF_DISABLED, | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 1110 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; | 
|  | 1111 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; | 
| Magnus Damm | 300e5f9 | 2011-05-24 10:31:20 +0000 | [diff] [blame] | 1112 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1113 | struct sh_dmae_device *shdev; | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1114 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1115 |  | 
| Dan Williams | 56adf7e | 2009-11-22 12:10:10 -0700 | [diff] [blame] | 1116 | /* get platform data */ | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1117 | if (!pdata || !pdata->channel_num) | 
| Dan Williams | 56adf7e | 2009-11-22 12:10:10 -0700 | [diff] [blame] | 1118 | return -ENODEV; | 
|  | 1119 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1120 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
| Magnus Damm | 26fc02a | 2011-05-24 10:31:12 +0000 | [diff] [blame] | 1121 | /* DMARS area is optional */ | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1122 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 
|  | 1123 | /* | 
|  | 1124 | * IRQ resources: | 
|  | 1125 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | 
|  | 1126 | *    the error IRQ, in which case it is the only IRQ in this resource: | 
|  | 1127 | *    start == end. If it is the only IRQ resource, all channels also | 
|  | 1128 | *    use the same IRQ. | 
|  | 1129 | * 2. DMA channel IRQ resources can be specified one per resource or in | 
|  | 1130 | *    ranges (start != end) | 
|  | 1131 | * 3. iff all events (channels and, optionally, error) on this | 
|  | 1132 | *    controller use the same IRQ, only one IRQ resource can be | 
|  | 1133 | *    specified, otherwise there must be one IRQ per channel, even if | 
|  | 1134 | *    some of them are equal | 
|  | 1135 | * 4. if all IRQs on this controller are equal or if some specific IRQs | 
|  | 1136 | *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | 
|  | 1137 | *    requested with the IRQF_SHARED flag | 
|  | 1138 | */ | 
|  | 1139 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 
|  | 1140 | if (!chan || !errirq_res) | 
|  | 1141 | return -ENODEV; | 
|  | 1142 |  | 
|  | 1143 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | 
|  | 1144 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | 
|  | 1145 | return -EBUSY; | 
|  | 1146 | } | 
|  | 1147 |  | 
|  | 1148 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | 
|  | 1149 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | 
|  | 1150 | err = -EBUSY; | 
|  | 1151 | goto ermrdmars; | 
|  | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | err = -ENOMEM; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1155 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | 
|  | 1156 | if (!shdev) { | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1157 | dev_err(&pdev->dev, "Not enough memory\n"); | 
|  | 1158 | goto ealloc; | 
|  | 1159 | } | 
|  | 1160 |  | 
|  | 1161 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | 
|  | 1162 | if (!shdev->chan_reg) | 
|  | 1163 | goto emapchan; | 
|  | 1164 | if (dmars) { | 
|  | 1165 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | 
|  | 1166 | if (!shdev->dmars) | 
|  | 1167 | goto emapdmars; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1168 | } | 
|  | 1169 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1170 | /* platform data */ | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1171 | shdev->pdata = pdata; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1172 |  | 
| Kuninori Morimoto | 5899a72 | 2011-06-17 08:20:40 +0000 | [diff] [blame] | 1173 | if (pdata->chcr_offset) | 
|  | 1174 | shdev->chcr_offset = pdata->chcr_offset; | 
|  | 1175 | else | 
|  | 1176 | shdev->chcr_offset = CHCR; | 
|  | 1177 |  | 
| Kuninori Morimoto | 67c6269 | 2011-06-17 08:20:51 +0000 | [diff] [blame] | 1178 | if (pdata->chcr_ie_bit) | 
|  | 1179 | shdev->chcr_ie_bit = pdata->chcr_ie_bit; | 
|  | 1180 | else | 
|  | 1181 | shdev->chcr_ie_bit = CHCR_IE; | 
|  | 1182 |  | 
| Paul Mundt | 5c2de44 | 2011-05-31 15:53:03 +0900 | [diff] [blame] | 1183 | platform_set_drvdata(pdev, shdev); | 
|  | 1184 |  | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 1185 | pm_runtime_enable(&pdev->dev); | 
|  | 1186 | pm_runtime_get_sync(&pdev->dev); | 
|  | 1187 |  | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1188 | spin_lock_irq(&sh_dmae_lock); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 1189 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1190 | spin_unlock_irq(&sh_dmae_lock); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 1191 |  | 
| Guennadi Liakhovetski | 2dc6666 | 2011-04-29 17:09:21 +0000 | [diff] [blame] | 1192 | /* reset dma controller - only needed as a test */ | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1193 | err = sh_dmae_rst(shdev); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1194 | if (err) | 
|  | 1195 | goto rst_err; | 
|  | 1196 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1197 | INIT_LIST_HEAD(&shdev->common.channels); | 
|  | 1198 |  | 
|  | 1199 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 
| Magnus Damm | 26fc02a | 2011-05-24 10:31:12 +0000 | [diff] [blame] | 1200 | if (pdata->slave && pdata->slave_num) | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1201 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 1202 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1203 | shdev->common.device_alloc_chan_resources | 
|  | 1204 | = sh_dmae_alloc_chan_resources; | 
|  | 1205 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | 
|  | 1206 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 1207 | shdev->common.device_tx_status = sh_dmae_tx_status; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1208 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 1209 |  | 
|  | 1210 | /* Compulsory for DMA_SLAVE fields */ | 
|  | 1211 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 1212 | shdev->common.device_control = sh_dmae_control; | 
| Guennadi Liakhovetski | cfefe99 | 2010-02-03 14:46:41 +0000 | [diff] [blame] | 1213 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1214 | shdev->common.dev = &pdev->dev; | 
| Guennadi Liakhovetski | ddb4f0f | 2009-12-04 19:44:41 +0100 | [diff] [blame] | 1215 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 
| Guennadi Liakhovetski | 8b1935e | 2010-02-11 16:50:14 +0000 | [diff] [blame] | 1216 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1217 |  | 
| Magnus Damm | 927a7c9 | 2010-03-19 04:47:19 +0000 | [diff] [blame] | 1218 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1219 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | 
|  | 1220 |  | 
|  | 1221 | if (!chanirq_res) | 
|  | 1222 | chanirq_res = errirq_res; | 
|  | 1223 | else | 
|  | 1224 | irqres++; | 
|  | 1225 |  | 
|  | 1226 | if (chanirq_res == errirq_res || | 
|  | 1227 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1228 | irqflags = IRQF_SHARED; | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1229 |  | 
|  | 1230 | errirq = errirq_res->start; | 
|  | 1231 |  | 
|  | 1232 | err = request_irq(errirq, sh_dmae_err, irqflags, | 
|  | 1233 | "DMAC Address Error", shdev); | 
|  | 1234 | if (err) { | 
|  | 1235 | dev_err(&pdev->dev, | 
|  | 1236 | "DMA failed requesting irq #%d, error %d\n", | 
|  | 1237 | errirq, err); | 
|  | 1238 | goto eirq_err; | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1239 | } | 
|  | 1240 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1241 | #else | 
|  | 1242 | chanirq_res = errirq_res; | 
| Magnus Damm | 927a7c9 | 2010-03-19 04:47:19 +0000 | [diff] [blame] | 1243 | #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1244 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1245 | if (chanirq_res->start == chanirq_res->end && | 
|  | 1246 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | 
|  | 1247 | /* Special case - all multiplexed */ | 
|  | 1248 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | 
| Magnus Damm | 300e5f9 | 2011-05-24 10:31:20 +0000 | [diff] [blame] | 1249 | if (irq_cnt < SH_DMAC_MAX_CHANNELS) { | 
|  | 1250 | chan_irq[irq_cnt] = chanirq_res->start; | 
|  | 1251 | chan_flag[irq_cnt] = IRQF_SHARED; | 
|  | 1252 | } else { | 
|  | 1253 | irq_cap = 1; | 
|  | 1254 | break; | 
|  | 1255 | } | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1256 | } | 
|  | 1257 | } else { | 
|  | 1258 | do { | 
|  | 1259 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | 
| Magnus Damm | dcee0bb | 2011-06-09 06:35:08 +0000 | [diff] [blame] | 1260 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { | 
|  | 1261 | irq_cap = 1; | 
|  | 1262 | break; | 
|  | 1263 | } | 
|  | 1264 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1265 | if ((errirq_res->flags & IORESOURCE_BITS) == | 
|  | 1266 | IORESOURCE_IRQ_SHAREABLE) | 
|  | 1267 | chan_flag[irq_cnt] = IRQF_SHARED; | 
|  | 1268 | else | 
|  | 1269 | chan_flag[irq_cnt] = IRQF_DISABLED; | 
|  | 1270 | dev_dbg(&pdev->dev, | 
|  | 1271 | "Found IRQ %d for channel %d\n", | 
|  | 1272 | i, irq_cnt); | 
|  | 1273 | chan_irq[irq_cnt++] = i; | 
| Magnus Damm | 300e5f9 | 2011-05-24 10:31:20 +0000 | [diff] [blame] | 1274 | } | 
|  | 1275 |  | 
| Magnus Damm | dcee0bb | 2011-06-09 06:35:08 +0000 | [diff] [blame] | 1276 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) | 
| Magnus Damm | 300e5f9 | 2011-05-24 10:31:20 +0000 | [diff] [blame] | 1277 | break; | 
| Magnus Damm | dcee0bb | 2011-06-09 06:35:08 +0000 | [diff] [blame] | 1278 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1279 | chanirq_res = platform_get_resource(pdev, | 
|  | 1280 | IORESOURCE_IRQ, ++irqres); | 
|  | 1281 | } while (irq_cnt < pdata->channel_num && chanirq_res); | 
|  | 1282 | } | 
|  | 1283 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1284 | /* Create DMA Channel */ | 
| Magnus Damm | 300e5f9 | 2011-05-24 10:31:20 +0000 | [diff] [blame] | 1285 | for (i = 0; i < irq_cnt; i++) { | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1286 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1287 | if (err) | 
|  | 1288 | goto chan_probe_err; | 
|  | 1289 | } | 
|  | 1290 |  | 
| Magnus Damm | 300e5f9 | 2011-05-24 10:31:20 +0000 | [diff] [blame] | 1291 | if (irq_cap) | 
|  | 1292 | dev_notice(&pdev->dev, "Attempting to register %d DMA " | 
|  | 1293 | "channels when a maximum of %d are supported.\n", | 
|  | 1294 | pdata->channel_num, SH_DMAC_MAX_CHANNELS); | 
|  | 1295 |  | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 1296 | pm_runtime_put(&pdev->dev); | 
|  | 1297 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1298 | dma_async_device_register(&shdev->common); | 
|  | 1299 |  | 
|  | 1300 | return err; | 
|  | 1301 |  | 
|  | 1302 | chan_probe_err: | 
|  | 1303 | sh_dmae_chan_remove(shdev); | 
| Magnus Damm | 300e5f9 | 2011-05-24 10:31:20 +0000 | [diff] [blame] | 1304 |  | 
| Magnus Damm | 927a7c9 | 2010-03-19 04:47:19 +0000 | [diff] [blame] | 1305 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1306 | free_irq(errirq, shdev); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1307 | eirq_err: | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1308 | #endif | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1309 | rst_err: | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1310 | spin_lock_irq(&sh_dmae_lock); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 1311 | list_del_rcu(&shdev->node); | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1312 | spin_unlock_irq(&sh_dmae_lock); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 1313 |  | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 1314 | pm_runtime_put(&pdev->dev); | 
| Guennadi Liakhovetski | 467017b | 2011-04-29 17:09:25 +0000 | [diff] [blame] | 1315 | pm_runtime_disable(&pdev->dev); | 
|  | 1316 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1317 | if (dmars) | 
|  | 1318 | iounmap(shdev->dmars); | 
| Paul Mundt | 5c2de44 | 2011-05-31 15:53:03 +0900 | [diff] [blame] | 1319 |  | 
|  | 1320 | platform_set_drvdata(pdev, NULL); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1321 | emapdmars: | 
|  | 1322 | iounmap(shdev->chan_reg); | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1323 | synchronize_rcu(); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1324 | emapchan: | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1325 | kfree(shdev); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1326 | ealloc: | 
|  | 1327 | if (dmars) | 
|  | 1328 | release_mem_region(dmars->start, resource_size(dmars)); | 
|  | 1329 | ermrdmars: | 
|  | 1330 | release_mem_region(chan->start, resource_size(chan)); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1331 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1332 | return err; | 
|  | 1333 | } | 
|  | 1334 |  | 
|  | 1335 | static int __exit sh_dmae_remove(struct platform_device *pdev) | 
|  | 1336 | { | 
|  | 1337 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1338 | struct resource *res; | 
|  | 1339 | int errirq = platform_get_irq(pdev, 0); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1340 |  | 
|  | 1341 | dma_async_device_unregister(&shdev->common); | 
|  | 1342 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1343 | if (errirq > 0) | 
|  | 1344 | free_irq(errirq, shdev); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1345 |  | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1346 | spin_lock_irq(&sh_dmae_lock); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 1347 | list_del_rcu(&shdev->node); | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1348 | spin_unlock_irq(&sh_dmae_lock); | 
| Paul Mundt | 03aa18f | 2010-12-17 19:16:10 +0900 | [diff] [blame] | 1349 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1350 | /* channel data remove */ | 
|  | 1351 | sh_dmae_chan_remove(shdev); | 
|  | 1352 |  | 
| Guennadi Liakhovetski | 20f2a3b | 2010-02-11 16:50:18 +0000 | [diff] [blame] | 1353 | pm_runtime_disable(&pdev->dev); | 
|  | 1354 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1355 | if (shdev->dmars) | 
|  | 1356 | iounmap(shdev->dmars); | 
|  | 1357 | iounmap(shdev->chan_reg); | 
|  | 1358 |  | 
| Paul Mundt | 5c2de44 | 2011-05-31 15:53:03 +0900 | [diff] [blame] | 1359 | platform_set_drvdata(pdev, NULL); | 
|  | 1360 |  | 
| Guennadi Liakhovetski | 31705e2 | 2011-05-02 07:59:02 +0000 | [diff] [blame] | 1361 | synchronize_rcu(); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1362 | kfree(shdev); | 
|  | 1363 |  | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1364 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
|  | 1365 | if (res) | 
|  | 1366 | release_mem_region(res->start, resource_size(res)); | 
|  | 1367 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 
|  | 1368 | if (res) | 
|  | 1369 | release_mem_region(res->start, resource_size(res)); | 
|  | 1370 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1371 | return 0; | 
|  | 1372 | } | 
|  | 1373 |  | 
|  | 1374 | static void sh_dmae_shutdown(struct platform_device *pdev) | 
|  | 1375 | { | 
|  | 1376 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 
| Guennadi Liakhovetski | 027811b | 2010-02-11 16:50:10 +0000 | [diff] [blame] | 1377 | sh_dmae_ctl_stop(shdev); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1378 | } | 
|  | 1379 |  | 
| Guennadi Liakhovetski | 467017b | 2011-04-29 17:09:25 +0000 | [diff] [blame] | 1380 | static int sh_dmae_runtime_suspend(struct device *dev) | 
|  | 1381 | { | 
|  | 1382 | return 0; | 
|  | 1383 | } | 
|  | 1384 |  | 
|  | 1385 | static int sh_dmae_runtime_resume(struct device *dev) | 
|  | 1386 | { | 
|  | 1387 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | 
|  | 1388 |  | 
|  | 1389 | return sh_dmae_rst(shdev); | 
|  | 1390 | } | 
|  | 1391 |  | 
|  | 1392 | #ifdef CONFIG_PM | 
|  | 1393 | static int sh_dmae_suspend(struct device *dev) | 
|  | 1394 | { | 
|  | 1395 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | 
|  | 1396 | int i; | 
|  | 1397 |  | 
|  | 1398 | for (i = 0; i < shdev->pdata->channel_num; i++) { | 
|  | 1399 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 
|  | 1400 | if (sh_chan->descs_allocated) | 
|  | 1401 | sh_chan->pm_error = pm_runtime_put_sync(dev); | 
|  | 1402 | } | 
|  | 1403 |  | 
|  | 1404 | return 0; | 
|  | 1405 | } | 
|  | 1406 |  | 
|  | 1407 | static int sh_dmae_resume(struct device *dev) | 
|  | 1408 | { | 
|  | 1409 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | 
|  | 1410 | int i; | 
|  | 1411 |  | 
|  | 1412 | for (i = 0; i < shdev->pdata->channel_num; i++) { | 
|  | 1413 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 
|  | 1414 | struct sh_dmae_slave *param = sh_chan->common.private; | 
|  | 1415 |  | 
|  | 1416 | if (!sh_chan->descs_allocated) | 
|  | 1417 | continue; | 
|  | 1418 |  | 
|  | 1419 | if (!sh_chan->pm_error) | 
|  | 1420 | pm_runtime_get_sync(dev); | 
|  | 1421 |  | 
|  | 1422 | if (param) { | 
|  | 1423 | const struct sh_dmae_slave_config *cfg = param->config; | 
|  | 1424 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 
|  | 1425 | dmae_set_chcr(sh_chan, cfg->chcr); | 
|  | 1426 | } else { | 
|  | 1427 | dmae_init(sh_chan); | 
|  | 1428 | } | 
|  | 1429 | } | 
|  | 1430 |  | 
|  | 1431 | return 0; | 
|  | 1432 | } | 
|  | 1433 | #else | 
|  | 1434 | #define sh_dmae_suspend NULL | 
|  | 1435 | #define sh_dmae_resume NULL | 
|  | 1436 | #endif | 
|  | 1437 |  | 
|  | 1438 | const struct dev_pm_ops sh_dmae_pm = { | 
|  | 1439 | .suspend		= sh_dmae_suspend, | 
|  | 1440 | .resume			= sh_dmae_resume, | 
|  | 1441 | .runtime_suspend	= sh_dmae_runtime_suspend, | 
|  | 1442 | .runtime_resume		= sh_dmae_runtime_resume, | 
|  | 1443 | }; | 
|  | 1444 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1445 | static struct platform_driver sh_dmae_driver = { | 
|  | 1446 | .remove		= __exit_p(sh_dmae_remove), | 
|  | 1447 | .shutdown	= sh_dmae_shutdown, | 
|  | 1448 | .driver = { | 
| Guennadi Liakhovetski | 7a5c106 | 2010-05-21 15:28:51 +0000 | [diff] [blame] | 1449 | .owner	= THIS_MODULE, | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1450 | .name	= "sh-dma-engine", | 
| Guennadi Liakhovetski | 467017b | 2011-04-29 17:09:25 +0000 | [diff] [blame] | 1451 | .pm	= &sh_dmae_pm, | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1452 | }, | 
|  | 1453 | }; | 
|  | 1454 |  | 
|  | 1455 | static int __init sh_dmae_init(void) | 
|  | 1456 | { | 
| Guennadi Liakhovetski | 661382f | 2011-01-06 17:04:50 +0000 | [diff] [blame] | 1457 | /* Wire up NMI handling */ | 
|  | 1458 | int err = register_die_notifier(&sh_dmae_nmi_notifier); | 
|  | 1459 | if (err) | 
|  | 1460 | return err; | 
|  | 1461 |  | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1462 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | 
|  | 1463 | } | 
|  | 1464 | module_init(sh_dmae_init); | 
|  | 1465 |  | 
|  | 1466 | static void __exit sh_dmae_exit(void) | 
|  | 1467 | { | 
|  | 1468 | platform_driver_unregister(&sh_dmae_driver); | 
| Guennadi Liakhovetski | 661382f | 2011-01-06 17:04:50 +0000 | [diff] [blame] | 1469 |  | 
|  | 1470 | unregister_die_notifier(&sh_dmae_nmi_notifier); | 
| Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1471 | } | 
|  | 1472 | module_exit(sh_dmae_exit); | 
|  | 1473 |  | 
|  | 1474 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | 
|  | 1475 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | 
|  | 1476 | MODULE_LICENSE("GPL"); | 
| Guennadi Liakhovetski | e584334 | 2010-11-24 09:48:10 +0000 | [diff] [blame] | 1477 | MODULE_ALIAS("platform:sh-dma-engine"); |