| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1 | /* | 
| Per Forlin | d49278e | 2010-12-20 18:31:38 +0100 | [diff] [blame] | 2 |  * Copyright (C) Ericsson AB 2007-2008 | 
 | 3 |  * Copyright (C) ST-Ericsson SA 2008-2010 | 
| Per Forlin | 661385f | 2010-10-06 09:05:28 +0000 | [diff] [blame] | 4 |  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 5 |  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 6 |  * License terms: GNU General Public License (GPL) version 2 | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 7 |  */ | 
 | 8 |  | 
| Alexey Dobriyan | b7f080c | 2011-06-16 11:01:34 +0000 | [diff] [blame] | 9 | #include <linux/dma-mapping.h> | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 10 | #include <linux/kernel.h> | 
 | 11 | #include <linux/slab.h> | 
| Paul Gortmaker | f492b21 | 2011-07-31 16:17:36 -0400 | [diff] [blame] | 12 | #include <linux/export.h> | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 13 | #include <linux/dmaengine.h> | 
 | 14 | #include <linux/platform_device.h> | 
 | 15 | #include <linux/clk.h> | 
 | 16 | #include <linux/delay.h> | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 17 | #include <linux/pm.h> | 
 | 18 | #include <linux/pm_runtime.h> | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 19 | #include <linux/err.h> | 
| Linus Walleij | f4b8976 | 2011-06-27 11:33:46 +0200 | [diff] [blame] | 20 | #include <linux/amba/bus.h> | 
| Linus Walleij | 15e4b78 | 2012-04-12 18:12:43 +0200 | [diff] [blame] | 21 | #include <linux/regulator/consumer.h> | 
| Linus Walleij | 865fab6 | 2012-10-18 14:20:16 +0200 | [diff] [blame] | 22 | #include <linux/platform_data/dma-ste-dma40.h> | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 23 |  | 
| Russell King - ARM Linux | d2ebfb3 | 2012-03-06 22:34:26 +0000 | [diff] [blame] | 24 | #include "dmaengine.h" | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 25 | #include "ste_dma40_ll.h" | 
 | 26 |  | 
 | 27 | #define D40_NAME "dma40" | 
 | 28 |  | 
 | 29 | #define D40_PHY_CHAN -1 | 
 | 30 |  | 
 | 31 | /* For masking out/in 2 bit channel positions */ | 
 | 32 | #define D40_CHAN_POS(chan)  (2 * (chan / 2)) | 
 | 33 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) | 
 | 34 |  | 
 | 35 | /* Maximum iterations taken before giving up suspending a channel */ | 
 | 36 | #define D40_SUSPEND_MAX_IT 500 | 
 | 37 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 38 | /* Milliseconds */ | 
 | 39 | #define DMA40_AUTOSUSPEND_DELAY	100 | 
 | 40 |  | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 41 | /* Hardware requirement on LCLA alignment */ | 
 | 42 | #define LCLA_ALIGNMENT 0x40000 | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 43 |  | 
 | 44 | /* Max number of links per event group */ | 
 | 45 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 | 
 | 46 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP | 
 | 47 |  | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 48 | /* Attempts before giving up to trying to get pages that are aligned */ | 
 | 49 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | 
 | 50 |  | 
 | 51 | /* Bit markings for allocation map */ | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 52 | #define D40_ALLOC_FREE		(1 << 31) | 
 | 53 | #define D40_ALLOC_PHY		(1 << 30) | 
 | 54 | #define D40_ALLOC_LOG_FREE	0 | 
 | 55 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 56 | /** | 
 | 57 |  * enum 40_command - The different commands and/or statuses. | 
 | 58 |  * | 
 | 59 |  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, | 
 | 60 |  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. | 
 | 61 |  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. | 
 | 62 |  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. | 
 | 63 |  */ | 
 | 64 | enum d40_command { | 
 | 65 | 	D40_DMA_STOP		= 0, | 
 | 66 | 	D40_DMA_RUN		= 1, | 
 | 67 | 	D40_DMA_SUSPEND_REQ	= 2, | 
 | 68 | 	D40_DMA_SUSPENDED	= 3 | 
 | 69 | }; | 
 | 70 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 71 | /* | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 72 |  * enum d40_events - The different Event Enables for the event lines. | 
 | 73 |  * | 
 | 74 |  * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. | 
 | 75 |  * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. | 
 | 76 |  * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. | 
 | 77 |  * @D40_ROUND_EVENTLINE: Status check for event line. | 
 | 78 |  */ | 
 | 79 |  | 
 | 80 | enum d40_events { | 
 | 81 | 	D40_DEACTIVATE_EVENTLINE	= 0, | 
 | 82 | 	D40_ACTIVATE_EVENTLINE		= 1, | 
 | 83 | 	D40_SUSPEND_REQ_EVENTLINE	= 2, | 
 | 84 | 	D40_ROUND_EVENTLINE		= 3 | 
 | 85 | }; | 
 | 86 |  | 
 | 87 | /* | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 88 |  * These are the registers that has to be saved and later restored | 
 | 89 |  * when the DMA hw is powered off. | 
 | 90 |  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | 
 | 91 |  */ | 
 | 92 | static u32 d40_backup_regs[] = { | 
 | 93 | 	D40_DREG_LCPA, | 
 | 94 | 	D40_DREG_LCLA, | 
 | 95 | 	D40_DREG_PRMSE, | 
 | 96 | 	D40_DREG_PRMSO, | 
 | 97 | 	D40_DREG_PRMOE, | 
 | 98 | 	D40_DREG_PRMOO, | 
 | 99 | }; | 
 | 100 |  | 
 | 101 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | 
 | 102 |  | 
 | 103 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | 
 | 104 | static u32 d40_backup_regs_v3[] = { | 
 | 105 | 	D40_DREG_PSEG1, | 
 | 106 | 	D40_DREG_PSEG2, | 
 | 107 | 	D40_DREG_PSEG3, | 
 | 108 | 	D40_DREG_PSEG4, | 
 | 109 | 	D40_DREG_PCEG1, | 
 | 110 | 	D40_DREG_PCEG2, | 
 | 111 | 	D40_DREG_PCEG3, | 
 | 112 | 	D40_DREG_PCEG4, | 
 | 113 | 	D40_DREG_RSEG1, | 
 | 114 | 	D40_DREG_RSEG2, | 
 | 115 | 	D40_DREG_RSEG3, | 
 | 116 | 	D40_DREG_RSEG4, | 
 | 117 | 	D40_DREG_RCEG1, | 
 | 118 | 	D40_DREG_RCEG2, | 
 | 119 | 	D40_DREG_RCEG3, | 
 | 120 | 	D40_DREG_RCEG4, | 
 | 121 | }; | 
 | 122 |  | 
 | 123 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | 
 | 124 |  | 
 | 125 | static u32 d40_backup_regs_chan[] = { | 
 | 126 | 	D40_CHAN_REG_SSCFG, | 
 | 127 | 	D40_CHAN_REG_SSELT, | 
 | 128 | 	D40_CHAN_REG_SSPTR, | 
 | 129 | 	D40_CHAN_REG_SSLNK, | 
 | 130 | 	D40_CHAN_REG_SDCFG, | 
 | 131 | 	D40_CHAN_REG_SDELT, | 
 | 132 | 	D40_CHAN_REG_SDPTR, | 
 | 133 | 	D40_CHAN_REG_SDLNK, | 
 | 134 | }; | 
 | 135 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 136 | /** | 
 | 137 |  * struct d40_lli_pool - Structure for keeping LLIs in memory | 
 | 138 |  * | 
 | 139 |  * @base: Pointer to memory area when the pre_alloc_lli's are not large | 
 | 140 |  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | 
 | 141 |  * pre_alloc_lli is used. | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 142 |  * @dma_addr: DMA address, if mapped | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 143 |  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. | 
 | 144 |  * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | 
 | 145 |  * one buffer to one buffer. | 
 | 146 |  */ | 
 | 147 | struct d40_lli_pool { | 
 | 148 | 	void	*base; | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 149 | 	int	 size; | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 150 | 	dma_addr_t	dma_addr; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 151 | 	/* Space for dst and src, plus an extra for padding */ | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 152 | 	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 153 | }; | 
 | 154 |  | 
 | 155 | /** | 
 | 156 |  * struct d40_desc - A descriptor is one DMA job. | 
 | 157 |  * | 
 | 158 |  * @lli_phy: LLI settings for physical channel. Both src and dst= | 
 | 159 |  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if | 
 | 160 |  * lli_len equals one. | 
 | 161 |  * @lli_log: Same as above but for logical channels. | 
 | 162 |  * @lli_pool: The pool with two entries pre-allocated. | 
| Per Friden | 941b77a | 2010-06-20 21:24:45 +0000 | [diff] [blame] | 163 |  * @lli_len: Number of llis of current descriptor. | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 164 |  * @lli_current: Number of transferred llis. | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 165 |  * @lcla_alloc: Number of LCLA entries allocated. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 166 |  * @txd: DMA engine struct. Used for among other things for communication | 
 | 167 |  * during a transfer. | 
 | 168 |  * @node: List entry. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 169 |  * @is_in_client_list: true if the client owns this descriptor. | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 170 |  * @cyclic: true if this is a cyclic job | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 171 |  * | 
 | 172 |  * This descriptor is used for both logical and physical transfers. | 
 | 173 |  */ | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 174 | struct d40_desc { | 
 | 175 | 	/* LLI physical */ | 
 | 176 | 	struct d40_phy_lli_bidir	 lli_phy; | 
 | 177 | 	/* LLI logical */ | 
 | 178 | 	struct d40_log_lli_bidir	 lli_log; | 
 | 179 |  | 
 | 180 | 	struct d40_lli_pool		 lli_pool; | 
| Per Friden | 941b77a | 2010-06-20 21:24:45 +0000 | [diff] [blame] | 181 | 	int				 lli_len; | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 182 | 	int				 lli_current; | 
 | 183 | 	int				 lcla_alloc; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 184 |  | 
 | 185 | 	struct dma_async_tx_descriptor	 txd; | 
 | 186 | 	struct list_head		 node; | 
 | 187 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 188 | 	bool				 is_in_client_list; | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 189 | 	bool				 cyclic; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 190 | }; | 
 | 191 |  | 
 | 192 | /** | 
 | 193 |  * struct d40_lcla_pool - LCLA pool settings and data. | 
 | 194 |  * | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 195 |  * @base: The virtual address of LCLA. 18 bit aligned. | 
 | 196 |  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. | 
 | 197 |  * This pointer is only there for clean-up on error. | 
 | 198 |  * @pages: The number of pages needed for all physical channels. | 
 | 199 |  * Only used later for clean-up on error | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 200 |  * @lock: Lock to protect the content in this struct. | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 201 |  * @alloc_map: big map over which LCLA entry is own by which job. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 202 |  */ | 
 | 203 | struct d40_lcla_pool { | 
 | 204 | 	void		*base; | 
| Rabin Vincent | 026cbc4 | 2011-01-25 11:18:14 +0100 | [diff] [blame] | 205 | 	dma_addr_t	dma_addr; | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 206 | 	void		*base_unaligned; | 
 | 207 | 	int		 pages; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 208 | 	spinlock_t	 lock; | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 209 | 	struct d40_desc	**alloc_map; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 210 | }; | 
 | 211 |  | 
 | 212 | /** | 
 | 213 |  * struct d40_phy_res - struct for handling eventlines mapped to physical | 
 | 214 |  * channels. | 
 | 215 |  * | 
 | 216 |  * @lock: A lock protection this entity. | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 217 |  * @reserved: True if used by secure world or otherwise. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 218 |  * @num: The physical channel number of this entity. | 
 | 219 |  * @allocated_src: Bit mapped to show which src event line's are mapped to | 
 | 220 |  * this physical channel. Can also be free or physically allocated. | 
 | 221 |  * @allocated_dst: Same as for src but is dst. | 
 | 222 |  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 223 |  * event line number. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 224 |  */ | 
 | 225 | struct d40_phy_res { | 
 | 226 | 	spinlock_t lock; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 227 | 	bool	   reserved; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 228 | 	int	   num; | 
 | 229 | 	u32	   allocated_src; | 
 | 230 | 	u32	   allocated_dst; | 
 | 231 | }; | 
 | 232 |  | 
 | 233 | struct d40_base; | 
 | 234 |  | 
 | 235 | /** | 
 | 236 |  * struct d40_chan - Struct that describes a channel. | 
 | 237 |  * | 
 | 238 |  * @lock: A spinlock to protect this struct. | 
 | 239 |  * @log_num: The logical number, if any of this channel. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 240 |  * @pending_tx: The number of pending transfers. Used between interrupt handler | 
 | 241 |  * and tasklet. | 
 | 242 |  * @busy: Set to true when transfer is ongoing on this channel. | 
| Jonas Aaberg | 2a61434 | 2010-06-20 21:25:24 +0000 | [diff] [blame] | 243 |  * @phy_chan: Pointer to physical channel which this instance runs on. If this | 
 | 244 |  * point is NULL, then the channel is not allocated. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 245 |  * @chan: DMA engine handle. | 
 | 246 |  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | 
 | 247 |  * transfer and call client callback. | 
 | 248 |  * @client: Cliented owned descriptor list. | 
| Per Forlin | da063d2 | 2011-08-29 13:33:32 +0200 | [diff] [blame] | 249 |  * @pending_queue: Submitted jobs, to be issued by issue_pending() | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 250 |  * @active: Active descriptor. | 
 | 251 |  * @queue: Queued jobs. | 
| Per Forlin | 82babbb36 | 2011-08-29 13:33:35 +0200 | [diff] [blame] | 252 |  * @prepare_queue: Prepared jobs. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 253 |  * @dma_cfg: The client configuration of this dma channel. | 
| Rabin Vincent | ce2ca12 | 2010-10-12 13:00:49 +0000 | [diff] [blame] | 254 |  * @configured: whether the dma_cfg configuration is valid | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 255 |  * @base: Pointer to the device instance struct. | 
 | 256 |  * @src_def_cfg: Default cfg register setting for src. | 
 | 257 |  * @dst_def_cfg: Default cfg register setting for dst. | 
 | 258 |  * @log_def: Default logical channel settings. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 259 |  * @lcpa: Pointer to dst and src lcpa settings. | 
| om prakash | ae752bf | 2011-06-27 11:33:31 +0200 | [diff] [blame] | 260 |  * @runtime_addr: runtime configured address. | 
 | 261 |  * @runtime_direction: runtime configured direction. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 262 |  * | 
 | 263 |  * This struct can either "be" a logical or a physical channel. | 
 | 264 |  */ | 
 | 265 | struct d40_chan { | 
 | 266 | 	spinlock_t			 lock; | 
 | 267 | 	int				 log_num; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 268 | 	int				 pending_tx; | 
 | 269 | 	bool				 busy; | 
 | 270 | 	struct d40_phy_res		*phy_chan; | 
 | 271 | 	struct dma_chan			 chan; | 
 | 272 | 	struct tasklet_struct		 tasklet; | 
 | 273 | 	struct list_head		 client; | 
| Per Forlin | a8f3067 | 2011-06-26 23:29:52 +0200 | [diff] [blame] | 274 | 	struct list_head		 pending_queue; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 275 | 	struct list_head		 active; | 
 | 276 | 	struct list_head		 queue; | 
| Per Forlin | 82babbb36 | 2011-08-29 13:33:35 +0200 | [diff] [blame] | 277 | 	struct list_head		 prepare_queue; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 278 | 	struct stedma40_chan_cfg	 dma_cfg; | 
| Rabin Vincent | ce2ca12 | 2010-10-12 13:00:49 +0000 | [diff] [blame] | 279 | 	bool				 configured; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 280 | 	struct d40_base			*base; | 
 | 281 | 	/* Default register configurations */ | 
 | 282 | 	u32				 src_def_cfg; | 
 | 283 | 	u32				 dst_def_cfg; | 
 | 284 | 	struct d40_def_lcsp		 log_def; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 285 | 	struct d40_log_lli_full		*lcpa; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 286 | 	/* Runtime reconfiguration */ | 
 | 287 | 	dma_addr_t			runtime_addr; | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 288 | 	enum dma_transfer_direction	runtime_direction; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 289 | }; | 
 | 290 |  | 
 | 291 | /** | 
 | 292 |  * struct d40_base - The big global struct, one for each probe'd instance. | 
 | 293 |  * | 
 | 294 |  * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | 
 | 295 |  * @execmd_lock: Lock for execute command usage since several channels share | 
 | 296 |  * the same physical register. | 
 | 297 |  * @dev: The device structure. | 
 | 298 |  * @virtbase: The virtual base address of the DMA's register. | 
| Linus Walleij | f418559 | 2010-06-22 18:06:42 -0700 | [diff] [blame] | 299 |  * @rev: silicon revision detected. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 300 |  * @clk: Pointer to the DMA clock structure. | 
 | 301 |  * @phy_start: Physical memory start of the DMA registers. | 
 | 302 |  * @phy_size: Size of the DMA register map. | 
 | 303 |  * @irq: The IRQ number. | 
 | 304 |  * @num_phy_chans: The number of physical channels. Read from HW. This | 
 | 305 |  * is the number of available channels for this driver, not counting "Secure | 
 | 306 |  * mode" allocated physical channels. | 
 | 307 |  * @num_log_chans: The number of logical channels. Calculated from | 
 | 308 |  * num_phy_chans. | 
 | 309 |  * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 
 | 310 |  * @dma_slave: dma_device channels that can do only do slave transfers. | 
 | 311 |  * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 312 |  * @phy_chans: Room for all possible physical channels in system. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 313 |  * @log_chans: Room for all possible logical channels in system. | 
 | 314 |  * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 
 | 315 |  * to log_chans entries. | 
 | 316 |  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points | 
 | 317 |  * to phy_chans entries. | 
 | 318 |  * @plat_data: Pointer to provided platform_data which is the driver | 
 | 319 |  * configuration. | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 320 |  * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 321 |  * @phy_res: Vector containing all physical channels. | 
 | 322 |  * @lcla_pool: lcla pool settings and data. | 
 | 323 |  * @lcpa_base: The virtual mapped address of LCPA. | 
 | 324 |  * @phy_lcpa: The physical address of the LCPA. | 
 | 325 |  * @lcpa_size: The size of the LCPA area. | 
| Jonas Aaberg | c675b1b | 2010-06-20 21:25:08 +0000 | [diff] [blame] | 326 |  * @desc_slab: cache for descriptors. | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 327 |  * @reg_val_backup: Here the values of some hardware registers are stored | 
 | 328 |  * before the DMA is powered off. They are restored when the power is back on. | 
 | 329 |  * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | 
 | 330 |  * later. | 
 | 331 |  * @reg_val_backup_chan: Backup data for standard channel parameter registers. | 
 | 332 |  * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | 
 | 333 |  * @initialized: true if the dma has been initialized | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 334 |  */ | 
 | 335 | struct d40_base { | 
 | 336 | 	spinlock_t			 interrupt_lock; | 
 | 337 | 	spinlock_t			 execmd_lock; | 
 | 338 | 	struct device			 *dev; | 
 | 339 | 	void __iomem			 *virtbase; | 
| Linus Walleij | f418559 | 2010-06-22 18:06:42 -0700 | [diff] [blame] | 340 | 	u8				  rev:4; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 341 | 	struct clk			 *clk; | 
 | 342 | 	phys_addr_t			  phy_start; | 
 | 343 | 	resource_size_t			  phy_size; | 
 | 344 | 	int				  irq; | 
 | 345 | 	int				  num_phy_chans; | 
 | 346 | 	int				  num_log_chans; | 
 | 347 | 	struct dma_device		  dma_both; | 
 | 348 | 	struct dma_device		  dma_slave; | 
 | 349 | 	struct dma_device		  dma_memcpy; | 
 | 350 | 	struct d40_chan			 *phy_chans; | 
 | 351 | 	struct d40_chan			 *log_chans; | 
 | 352 | 	struct d40_chan			**lookup_log_chans; | 
 | 353 | 	struct d40_chan			**lookup_phy_chans; | 
 | 354 | 	struct stedma40_platform_data	 *plat_data; | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 355 | 	struct regulator		 *lcpa_regulator; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 356 | 	/* Physical half channels */ | 
 | 357 | 	struct d40_phy_res		 *phy_res; | 
 | 358 | 	struct d40_lcla_pool		  lcla_pool; | 
 | 359 | 	void				 *lcpa_base; | 
 | 360 | 	dma_addr_t			  phy_lcpa; | 
 | 361 | 	resource_size_t			  lcpa_size; | 
| Jonas Aaberg | c675b1b | 2010-06-20 21:25:08 +0000 | [diff] [blame] | 362 | 	struct kmem_cache		 *desc_slab; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 363 | 	u32				  reg_val_backup[BACKUP_REGS_SZ]; | 
 | 364 | 	u32				  reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | 
 | 365 | 	u32				 *reg_val_backup_chan; | 
 | 366 | 	u16				  gcc_pwr_off_mask; | 
 | 367 | 	bool				  initialized; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 368 | }; | 
 | 369 |  | 
 | 370 | /** | 
 | 371 |  * struct d40_interrupt_lookup - lookup table for interrupt handler | 
 | 372 |  * | 
 | 373 |  * @src: Interrupt mask register. | 
 | 374 |  * @clr: Interrupt clear register. | 
 | 375 |  * @is_error: true if this is an error interrupt. | 
 | 376 |  * @offset: start delta in the lookup_log_chans in d40_base. If equals to | 
 | 377 |  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | 
 | 378 |  */ | 
 | 379 | struct d40_interrupt_lookup { | 
 | 380 | 	u32 src; | 
 | 381 | 	u32 clr; | 
 | 382 | 	bool is_error; | 
 | 383 | 	int offset; | 
 | 384 | }; | 
 | 385 |  | 
 | 386 | /** | 
 | 387 |  * struct d40_reg_val - simple lookup struct | 
 | 388 |  * | 
 | 389 |  * @reg: The register. | 
 | 390 |  * @val: The value that belongs to the register in reg. | 
 | 391 |  */ | 
 | 392 | struct d40_reg_val { | 
 | 393 | 	unsigned int reg; | 
 | 394 | 	unsigned int val; | 
 | 395 | }; | 
 | 396 |  | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 397 | static struct device *chan2dev(struct d40_chan *d40c) | 
 | 398 | { | 
 | 399 | 	return &d40c->chan.dev->device; | 
 | 400 | } | 
 | 401 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 402 | static bool chan_is_physical(struct d40_chan *chan) | 
 | 403 | { | 
 | 404 | 	return chan->log_num == D40_PHY_CHAN; | 
 | 405 | } | 
 | 406 |  | 
 | 407 | static bool chan_is_logical(struct d40_chan *chan) | 
 | 408 | { | 
 | 409 | 	return !chan_is_physical(chan); | 
 | 410 | } | 
 | 411 |  | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 412 | static void __iomem *chan_base(struct d40_chan *chan) | 
 | 413 | { | 
 | 414 | 	return chan->base->virtbase + D40_DREG_PCBASE + | 
 | 415 | 	       chan->phy_chan->num * D40_DREG_PCDELTA; | 
 | 416 | } | 
 | 417 |  | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 418 | #define d40_err(dev, format, arg...)		\ | 
 | 419 | 	dev_err(dev, "[%s] " format, __func__, ## arg) | 
 | 420 |  | 
 | 421 | #define chan_err(d40c, format, arg...)		\ | 
 | 422 | 	d40_err(chan2dev(d40c), format, ## arg) | 
 | 423 |  | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 424 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, | 
| Rabin Vincent | dbd8878 | 2011-01-25 11:18:19 +0100 | [diff] [blame] | 425 | 			      int lli_len) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 426 | { | 
| Rabin Vincent | dbd8878 | 2011-01-25 11:18:19 +0100 | [diff] [blame] | 427 | 	bool is_log = chan_is_logical(d40c); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 428 | 	u32 align; | 
 | 429 | 	void *base; | 
 | 430 |  | 
 | 431 | 	if (is_log) | 
 | 432 | 		align = sizeof(struct d40_log_lli); | 
 | 433 | 	else | 
 | 434 | 		align = sizeof(struct d40_phy_lli); | 
 | 435 |  | 
 | 436 | 	if (lli_len == 1) { | 
 | 437 | 		base = d40d->lli_pool.pre_alloc_lli; | 
 | 438 | 		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | 
 | 439 | 		d40d->lli_pool.base = NULL; | 
 | 440 | 	} else { | 
| Rabin Vincent | 594ece4 | 2011-01-25 11:18:12 +0100 | [diff] [blame] | 441 | 		d40d->lli_pool.size = lli_len * 2 * align; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 442 |  | 
 | 443 | 		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | 
 | 444 | 		d40d->lli_pool.base = base; | 
 | 445 |  | 
 | 446 | 		if (d40d->lli_pool.base == NULL) | 
 | 447 | 			return -ENOMEM; | 
 | 448 | 	} | 
 | 449 |  | 
 | 450 | 	if (is_log) { | 
| Rabin Vincent | d924aba | 2011-01-25 11:18:16 +0100 | [diff] [blame] | 451 | 		d40d->lli_log.src = PTR_ALIGN(base, align); | 
| Rabin Vincent | 594ece4 | 2011-01-25 11:18:12 +0100 | [diff] [blame] | 452 | 		d40d->lli_log.dst = d40d->lli_log.src + lli_len; | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 453 |  | 
 | 454 | 		d40d->lli_pool.dma_addr = 0; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 455 | 	} else { | 
| Rabin Vincent | d924aba | 2011-01-25 11:18:16 +0100 | [diff] [blame] | 456 | 		d40d->lli_phy.src = PTR_ALIGN(base, align); | 
| Rabin Vincent | 594ece4 | 2011-01-25 11:18:12 +0100 | [diff] [blame] | 457 | 		d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 458 |  | 
 | 459 | 		d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, | 
 | 460 | 							 d40d->lli_phy.src, | 
 | 461 | 							 d40d->lli_pool.size, | 
 | 462 | 							 DMA_TO_DEVICE); | 
 | 463 |  | 
 | 464 | 		if (dma_mapping_error(d40c->base->dev, | 
 | 465 | 				      d40d->lli_pool.dma_addr)) { | 
 | 466 | 			kfree(d40d->lli_pool.base); | 
 | 467 | 			d40d->lli_pool.base = NULL; | 
 | 468 | 			d40d->lli_pool.dma_addr = 0; | 
 | 469 | 			return -ENOMEM; | 
 | 470 | 		} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 471 | 	} | 
 | 472 |  | 
 | 473 | 	return 0; | 
 | 474 | } | 
 | 475 |  | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 476 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 477 | { | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 478 | 	if (d40d->lli_pool.dma_addr) | 
 | 479 | 		dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, | 
 | 480 | 				 d40d->lli_pool.size, DMA_TO_DEVICE); | 
 | 481 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 482 | 	kfree(d40d->lli_pool.base); | 
 | 483 | 	d40d->lli_pool.base = NULL; | 
 | 484 | 	d40d->lli_pool.size = 0; | 
 | 485 | 	d40d->lli_log.src = NULL; | 
 | 486 | 	d40d->lli_log.dst = NULL; | 
 | 487 | 	d40d->lli_phy.src = NULL; | 
 | 488 | 	d40d->lli_phy.dst = NULL; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 489 | } | 
 | 490 |  | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 491 | static int d40_lcla_alloc_one(struct d40_chan *d40c, | 
 | 492 | 			      struct d40_desc *d40d) | 
 | 493 | { | 
 | 494 | 	unsigned long flags; | 
 | 495 | 	int i; | 
 | 496 | 	int ret = -EINVAL; | 
 | 497 | 	int p; | 
 | 498 |  | 
 | 499 | 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 
 | 500 |  | 
 | 501 | 	p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; | 
 | 502 |  | 
 | 503 | 	/* | 
 | 504 | 	 * Allocate both src and dst at the same time, therefore the half | 
 | 505 | 	 * start on 1 since 0 can't be used since zero is used as end marker. | 
 | 506 | 	 */ | 
 | 507 | 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | 
 | 508 | 		if (!d40c->base->lcla_pool.alloc_map[p + i]) { | 
 | 509 | 			d40c->base->lcla_pool.alloc_map[p + i] = d40d; | 
 | 510 | 			d40d->lcla_alloc++; | 
 | 511 | 			ret = i; | 
 | 512 | 			break; | 
 | 513 | 		} | 
 | 514 | 	} | 
 | 515 |  | 
 | 516 | 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | 
 | 517 |  | 
 | 518 | 	return ret; | 
 | 519 | } | 
 | 520 |  | 
 | 521 | static int d40_lcla_free_all(struct d40_chan *d40c, | 
 | 522 | 			     struct d40_desc *d40d) | 
 | 523 | { | 
 | 524 | 	unsigned long flags; | 
 | 525 | 	int i; | 
 | 526 | 	int ret = -EINVAL; | 
 | 527 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 528 | 	if (chan_is_physical(d40c)) | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 529 | 		return 0; | 
 | 530 |  | 
 | 531 | 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 
 | 532 |  | 
 | 533 | 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | 
 | 534 | 		if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | 
 | 535 | 						    D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { | 
 | 536 | 			d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | 
 | 537 | 							D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; | 
 | 538 | 			d40d->lcla_alloc--; | 
 | 539 | 			if (d40d->lcla_alloc == 0) { | 
 | 540 | 				ret = 0; | 
 | 541 | 				break; | 
 | 542 | 			} | 
 | 543 | 		} | 
 | 544 | 	} | 
 | 545 |  | 
 | 546 | 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | 
 | 547 |  | 
 | 548 | 	return ret; | 
 | 549 |  | 
 | 550 | } | 
 | 551 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 552 | static void d40_desc_remove(struct d40_desc *d40d) | 
 | 553 | { | 
 | 554 | 	list_del(&d40d->node); | 
 | 555 | } | 
 | 556 |  | 
 | 557 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | 
 | 558 | { | 
| Rabin Vincent | a2c15fa | 2010-10-06 08:20:37 +0000 | [diff] [blame] | 559 | 	struct d40_desc *desc = NULL; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 560 |  | 
 | 561 | 	if (!list_empty(&d40c->client)) { | 
| Rabin Vincent | a2c15fa | 2010-10-06 08:20:37 +0000 | [diff] [blame] | 562 | 		struct d40_desc *d; | 
 | 563 | 		struct d40_desc *_d; | 
 | 564 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 565 | 		list_for_each_entry_safe(d, _d, &d40c->client, node) { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 566 | 			if (async_tx_test_ack(&d->txd)) { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 567 | 				d40_desc_remove(d); | 
| Rabin Vincent | a2c15fa | 2010-10-06 08:20:37 +0000 | [diff] [blame] | 568 | 				desc = d; | 
 | 569 | 				memset(desc, 0, sizeof(*desc)); | 
| Jonas Aaberg | c675b1b | 2010-06-20 21:25:08 +0000 | [diff] [blame] | 570 | 				break; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 571 | 			} | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 572 | 		} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 573 | 	} | 
| Rabin Vincent | a2c15fa | 2010-10-06 08:20:37 +0000 | [diff] [blame] | 574 |  | 
 | 575 | 	if (!desc) | 
 | 576 | 		desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); | 
 | 577 |  | 
 | 578 | 	if (desc) | 
 | 579 | 		INIT_LIST_HEAD(&desc->node); | 
 | 580 |  | 
 | 581 | 	return desc; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 582 | } | 
 | 583 |  | 
 | 584 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 
 | 585 | { | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 586 |  | 
| Rabin Vincent | b00f938 | 2011-01-25 11:18:15 +0100 | [diff] [blame] | 587 | 	d40_pool_lli_free(d40c, d40d); | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 588 | 	d40_lcla_free_all(d40c, d40d); | 
| Jonas Aaberg | c675b1b | 2010-06-20 21:25:08 +0000 | [diff] [blame] | 589 | 	kmem_cache_free(d40c->base->desc_slab, d40d); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 590 | } | 
 | 591 |  | 
 | 592 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | 
 | 593 | { | 
 | 594 | 	list_add_tail(&desc->node, &d40c->active); | 
 | 595 | } | 
 | 596 |  | 
| Rabin Vincent | 1c4b092 | 2011-01-25 11:18:24 +0100 | [diff] [blame] | 597 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) | 
 | 598 | { | 
 | 599 | 	struct d40_phy_lli *lli_dst = desc->lli_phy.dst; | 
 | 600 | 	struct d40_phy_lli *lli_src = desc->lli_phy.src; | 
 | 601 | 	void __iomem *base = chan_base(chan); | 
 | 602 |  | 
 | 603 | 	writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); | 
 | 604 | 	writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); | 
 | 605 | 	writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); | 
 | 606 | 	writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); | 
 | 607 |  | 
 | 608 | 	writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); | 
 | 609 | 	writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); | 
 | 610 | 	writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); | 
 | 611 | 	writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | 
 | 612 | } | 
 | 613 |  | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 614 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | 
 | 615 | { | 
 | 616 | 	struct d40_lcla_pool *pool = &chan->base->lcla_pool; | 
 | 617 | 	struct d40_log_lli_bidir *lli = &desc->lli_log; | 
 | 618 | 	int lli_current = desc->lli_current; | 
 | 619 | 	int lli_len = desc->lli_len; | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 620 | 	bool cyclic = desc->cyclic; | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 621 | 	int curr_lcla = -EINVAL; | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 622 | 	int first_lcla = 0; | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 623 | 	bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 624 | 	bool linkback; | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 625 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 626 | 	/* | 
 | 627 | 	 * We may have partially running cyclic transfers, in case we did't get | 
 | 628 | 	 * enough LCLA entries. | 
 | 629 | 	 */ | 
 | 630 | 	linkback = cyclic && lli_current == 0; | 
 | 631 |  | 
 | 632 | 	/* | 
 | 633 | 	 * For linkback, we need one LCLA even with only one link, because we | 
 | 634 | 	 * can't link back to the one in LCPA space | 
 | 635 | 	 */ | 
 | 636 | 	if (linkback || (lli_len - lli_current > 1)) { | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 637 | 		curr_lcla = d40_lcla_alloc_one(chan, desc); | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 638 | 		first_lcla = curr_lcla; | 
 | 639 | 	} | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 640 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 641 | 	/* | 
 | 642 | 	 * For linkback, we normally load the LCPA in the loop since we need to | 
 | 643 | 	 * link it to the second LCLA and not the first.  However, if we | 
 | 644 | 	 * couldn't even get a first LCLA, then we have to run in LCPA and | 
 | 645 | 	 * reload manually. | 
 | 646 | 	 */ | 
 | 647 | 	if (!linkback || curr_lcla == -EINVAL) { | 
 | 648 | 		unsigned int flags = 0; | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 649 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 650 | 		if (curr_lcla == -EINVAL) | 
 | 651 | 			flags |= LLI_TERM_INT; | 
 | 652 |  | 
 | 653 | 		d40_log_lli_lcpa_write(chan->lcpa, | 
 | 654 | 				       &lli->dst[lli_current], | 
 | 655 | 				       &lli->src[lli_current], | 
 | 656 | 				       curr_lcla, | 
 | 657 | 				       flags); | 
 | 658 | 		lli_current++; | 
 | 659 | 	} | 
| Rabin Vincent | 6045f0b | 2011-01-25 11:18:32 +0100 | [diff] [blame] | 660 |  | 
 | 661 | 	if (curr_lcla < 0) | 
 | 662 | 		goto out; | 
 | 663 |  | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 664 | 	for (; lli_current < lli_len; lli_current++) { | 
 | 665 | 		unsigned int lcla_offset = chan->phy_chan->num * 1024 + | 
 | 666 | 					   8 * curr_lcla * 2; | 
 | 667 | 		struct d40_log_lli *lcla = pool->base + lcla_offset; | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 668 | 		unsigned int flags = 0; | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 669 | 		int next_lcla; | 
 | 670 |  | 
 | 671 | 		if (lli_current + 1 < lli_len) | 
 | 672 | 			next_lcla = d40_lcla_alloc_one(chan, desc); | 
 | 673 | 		else | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 674 | 			next_lcla = linkback ? first_lcla : -EINVAL; | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 675 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 676 | 		if (cyclic || next_lcla == -EINVAL) | 
 | 677 | 			flags |= LLI_TERM_INT; | 
 | 678 |  | 
 | 679 | 		if (linkback && curr_lcla == first_lcla) { | 
 | 680 | 			/* First link goes in both LCPA and LCLA */ | 
 | 681 | 			d40_log_lli_lcpa_write(chan->lcpa, | 
 | 682 | 					       &lli->dst[lli_current], | 
 | 683 | 					       &lli->src[lli_current], | 
 | 684 | 					       next_lcla, flags); | 
 | 685 | 		} | 
 | 686 |  | 
 | 687 | 		/* | 
 | 688 | 		 * One unused LCLA in the cyclic case if the very first | 
 | 689 | 		 * next_lcla fails... | 
 | 690 | 		 */ | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 691 | 		d40_log_lli_lcla_write(lcla, | 
 | 692 | 				       &lli->dst[lli_current], | 
 | 693 | 				       &lli->src[lli_current], | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 694 | 				       next_lcla, flags); | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 695 |  | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 696 | 		/* | 
 | 697 | 		 * Cache maintenance is not needed if lcla is | 
 | 698 | 		 * mapped in esram | 
 | 699 | 		 */ | 
 | 700 | 		if (!use_esram_lcla) { | 
 | 701 | 			dma_sync_single_range_for_device(chan->base->dev, | 
 | 702 | 						pool->dma_addr, lcla_offset, | 
 | 703 | 						2 * sizeof(struct d40_log_lli), | 
 | 704 | 						DMA_TO_DEVICE); | 
 | 705 | 		} | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 706 | 		curr_lcla = next_lcla; | 
 | 707 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 708 | 		if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 709 | 			lli_current++; | 
 | 710 | 			break; | 
 | 711 | 		} | 
 | 712 | 	} | 
 | 713 |  | 
| Rabin Vincent | 6045f0b | 2011-01-25 11:18:32 +0100 | [diff] [blame] | 714 | out: | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 715 | 	desc->lli_current = lli_current; | 
 | 716 | } | 
 | 717 |  | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 718 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 
 | 719 | { | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 720 | 	if (chan_is_physical(d40c)) { | 
| Rabin Vincent | 1c4b092 | 2011-01-25 11:18:24 +0100 | [diff] [blame] | 721 | 		d40_phy_lli_load(d40c, d40d); | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 722 | 		d40d->lli_current = d40d->lli_len; | 
| Rabin Vincent | e65889c | 2011-01-25 11:18:31 +0100 | [diff] [blame] | 723 | 	} else | 
 | 724 | 		d40_log_lli_to_lcxa(d40c, d40d); | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 725 | } | 
 | 726 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 727 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | 
 | 728 | { | 
 | 729 | 	struct d40_desc *d; | 
 | 730 |  | 
 | 731 | 	if (list_empty(&d40c->active)) | 
 | 732 | 		return NULL; | 
 | 733 |  | 
 | 734 | 	d = list_first_entry(&d40c->active, | 
 | 735 | 			     struct d40_desc, | 
 | 736 | 			     node); | 
 | 737 | 	return d; | 
 | 738 | } | 
 | 739 |  | 
| Per Forlin | 7404368 | 2011-08-29 13:33:34 +0200 | [diff] [blame] | 740 | /* remove desc from current queue and add it to the pending_queue */ | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 741 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | 
 | 742 | { | 
| Per Forlin | 7404368 | 2011-08-29 13:33:34 +0200 | [diff] [blame] | 743 | 	d40_desc_remove(desc); | 
 | 744 | 	desc->is_in_client_list = false; | 
| Per Forlin | a8f3067 | 2011-06-26 23:29:52 +0200 | [diff] [blame] | 745 | 	list_add_tail(&desc->node, &d40c->pending_queue); | 
 | 746 | } | 
 | 747 |  | 
 | 748 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) | 
 | 749 | { | 
 | 750 | 	struct d40_desc *d; | 
 | 751 |  | 
 | 752 | 	if (list_empty(&d40c->pending_queue)) | 
 | 753 | 		return NULL; | 
 | 754 |  | 
 | 755 | 	d = list_first_entry(&d40c->pending_queue, | 
 | 756 | 			     struct d40_desc, | 
 | 757 | 			     node); | 
 | 758 | 	return d; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 759 | } | 
 | 760 |  | 
 | 761 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | 
 | 762 | { | 
 | 763 | 	struct d40_desc *d; | 
 | 764 |  | 
 | 765 | 	if (list_empty(&d40c->queue)) | 
 | 766 | 		return NULL; | 
 | 767 |  | 
 | 768 | 	d = list_first_entry(&d40c->queue, | 
 | 769 | 			     struct d40_desc, | 
 | 770 | 			     node); | 
 | 771 | 	return d; | 
 | 772 | } | 
 | 773 |  | 
| Per Forlin | d49278e | 2010-12-20 18:31:38 +0100 | [diff] [blame] | 774 | static int d40_psize_2_burst_size(bool is_log, int psize) | 
 | 775 | { | 
 | 776 | 	if (is_log) { | 
 | 777 | 		if (psize == STEDMA40_PSIZE_LOG_1) | 
 | 778 | 			return 1; | 
 | 779 | 	} else { | 
 | 780 | 		if (psize == STEDMA40_PSIZE_PHY_1) | 
 | 781 | 			return 1; | 
 | 782 | 	} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 783 |  | 
| Per Forlin | d49278e | 2010-12-20 18:31:38 +0100 | [diff] [blame] | 784 | 	return 2 << psize; | 
 | 785 | } | 
 | 786 |  | 
 | 787 | /* | 
 | 788 |  * The dma only supports transmitting packages up to | 
 | 789 |  * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of | 
 | 790 |  * dma elements required to send the entire sg list | 
 | 791 |  */ | 
 | 792 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) | 
 | 793 | { | 
 | 794 | 	int dmalen; | 
 | 795 | 	u32 max_w = max(data_width1, data_width2); | 
 | 796 | 	u32 min_w = min(data_width1, data_width2); | 
 | 797 | 	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | 
 | 798 |  | 
 | 799 | 	if (seg_max > STEDMA40_MAX_SEG_SIZE) | 
 | 800 | 		seg_max -= (1 << max_w); | 
 | 801 |  | 
 | 802 | 	if (!IS_ALIGNED(size, 1 << max_w)) | 
 | 803 | 		return -EINVAL; | 
 | 804 |  | 
 | 805 | 	if (size <= seg_max) | 
 | 806 | 		dmalen = 1; | 
 | 807 | 	else { | 
 | 808 | 		dmalen = size / seg_max; | 
 | 809 | 		if (dmalen * seg_max < size) | 
 | 810 | 			dmalen++; | 
 | 811 | 	} | 
 | 812 | 	return dmalen; | 
 | 813 | } | 
 | 814 |  | 
 | 815 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | 
 | 816 | 			   u32 data_width1, u32 data_width2) | 
 | 817 | { | 
 | 818 | 	struct scatterlist *sg; | 
 | 819 | 	int i; | 
 | 820 | 	int len = 0; | 
 | 821 | 	int ret; | 
 | 822 |  | 
 | 823 | 	for_each_sg(sgl, sg, sg_len, i) { | 
 | 824 | 		ret = d40_size_2_dmalen(sg_dma_len(sg), | 
 | 825 | 					data_width1, data_width2); | 
 | 826 | 		if (ret < 0) | 
 | 827 | 			return ret; | 
 | 828 | 		len += ret; | 
 | 829 | 	} | 
 | 830 | 	return len; | 
 | 831 | } | 
 | 832 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 833 |  | 
 | 834 | #ifdef CONFIG_PM | 
 | 835 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | 
 | 836 | 			 u32 *regaddr, int num, bool save) | 
 | 837 | { | 
 | 838 | 	int i; | 
 | 839 |  | 
 | 840 | 	for (i = 0; i < num; i++) { | 
 | 841 | 		void __iomem *addr = baseaddr + regaddr[i]; | 
 | 842 |  | 
 | 843 | 		if (save) | 
 | 844 | 			backup[i] = readl_relaxed(addr); | 
 | 845 | 		else | 
 | 846 | 			writel_relaxed(backup[i], addr); | 
 | 847 | 	} | 
 | 848 | } | 
 | 849 |  | 
 | 850 | static void d40_save_restore_registers(struct d40_base *base, bool save) | 
 | 851 | { | 
 | 852 | 	int i; | 
 | 853 |  | 
 | 854 | 	/* Save/Restore channel specific registers */ | 
 | 855 | 	for (i = 0; i < base->num_phy_chans; i++) { | 
 | 856 | 		void __iomem *addr; | 
 | 857 | 		int idx; | 
 | 858 |  | 
 | 859 | 		if (base->phy_res[i].reserved) | 
 | 860 | 			continue; | 
 | 861 |  | 
 | 862 | 		addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | 
 | 863 | 		idx = i * ARRAY_SIZE(d40_backup_regs_chan); | 
 | 864 |  | 
 | 865 | 		dma40_backup(addr, &base->reg_val_backup_chan[idx], | 
 | 866 | 			     d40_backup_regs_chan, | 
 | 867 | 			     ARRAY_SIZE(d40_backup_regs_chan), | 
 | 868 | 			     save); | 
 | 869 | 	} | 
 | 870 |  | 
 | 871 | 	/* Save/Restore global registers */ | 
 | 872 | 	dma40_backup(base->virtbase, base->reg_val_backup, | 
 | 873 | 		     d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | 
 | 874 | 		     save); | 
 | 875 |  | 
 | 876 | 	/* Save/Restore registers only existing on dma40 v3 and later */ | 
 | 877 | 	if (base->rev >= 3) | 
 | 878 | 		dma40_backup(base->virtbase, base->reg_val_backup_v3, | 
 | 879 | 			     d40_backup_regs_v3, | 
 | 880 | 			     ARRAY_SIZE(d40_backup_regs_v3), | 
 | 881 | 			     save); | 
 | 882 | } | 
 | 883 | #else | 
 | 884 | static void d40_save_restore_registers(struct d40_base *base, bool save) | 
 | 885 | { | 
 | 886 | } | 
 | 887 | #endif | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 888 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 889 | static int __d40_execute_command_phy(struct d40_chan *d40c, | 
 | 890 | 				     enum d40_command command) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 891 | { | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 892 | 	u32 status; | 
 | 893 | 	int i; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 894 | 	void __iomem *active_reg; | 
 | 895 | 	int ret = 0; | 
 | 896 | 	unsigned long flags; | 
| Jonas Aaberg | 1d392a7 | 2010-06-20 21:26:01 +0000 | [diff] [blame] | 897 | 	u32 wmask; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 898 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 899 | 	if (command == D40_DMA_STOP) { | 
 | 900 | 		ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); | 
 | 901 | 		if (ret) | 
 | 902 | 			return ret; | 
 | 903 | 	} | 
 | 904 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 905 | 	spin_lock_irqsave(&d40c->base->execmd_lock, flags); | 
 | 906 |  | 
 | 907 | 	if (d40c->phy_chan->num % 2 == 0) | 
 | 908 | 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 
 | 909 | 	else | 
 | 910 | 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | 
 | 911 |  | 
 | 912 | 	if (command == D40_DMA_SUSPEND_REQ) { | 
 | 913 | 		status = (readl(active_reg) & | 
 | 914 | 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 
 | 915 | 			D40_CHAN_POS(d40c->phy_chan->num); | 
 | 916 |  | 
 | 917 | 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | 
 | 918 | 			goto done; | 
 | 919 | 	} | 
 | 920 |  | 
| Jonas Aaberg | 1d392a7 | 2010-06-20 21:26:01 +0000 | [diff] [blame] | 921 | 	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); | 
 | 922 | 	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), | 
 | 923 | 	       active_reg); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 924 |  | 
 | 925 | 	if (command == D40_DMA_SUSPEND_REQ) { | 
 | 926 |  | 
 | 927 | 		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { | 
 | 928 | 			status = (readl(active_reg) & | 
 | 929 | 				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 
 | 930 | 				D40_CHAN_POS(d40c->phy_chan->num); | 
 | 931 |  | 
 | 932 | 			cpu_relax(); | 
 | 933 | 			/* | 
 | 934 | 			 * Reduce the number of bus accesses while | 
 | 935 | 			 * waiting for the DMA to suspend. | 
 | 936 | 			 */ | 
 | 937 | 			udelay(3); | 
 | 938 |  | 
 | 939 | 			if (status == D40_DMA_STOP || | 
 | 940 | 			    status == D40_DMA_SUSPENDED) | 
 | 941 | 				break; | 
 | 942 | 		} | 
 | 943 |  | 
 | 944 | 		if (i == D40_SUSPEND_MAX_IT) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 945 | 			chan_err(d40c, | 
 | 946 | 				"unable to suspend the chl %d (log: %d) status %x\n", | 
 | 947 | 				d40c->phy_chan->num, d40c->log_num, | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 948 | 				status); | 
 | 949 | 			dump_stack(); | 
 | 950 | 			ret = -EBUSY; | 
 | 951 | 		} | 
 | 952 |  | 
 | 953 | 	} | 
 | 954 | done: | 
 | 955 | 	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); | 
 | 956 | 	return ret; | 
 | 957 | } | 
 | 958 |  | 
 | 959 | static void d40_term_all(struct d40_chan *d40c) | 
 | 960 | { | 
 | 961 | 	struct d40_desc *d40d; | 
| Per Forlin | 7404368 | 2011-08-29 13:33:34 +0200 | [diff] [blame] | 962 | 	struct d40_desc *_d; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 963 |  | 
 | 964 | 	/* Release active descriptors */ | 
 | 965 | 	while ((d40d = d40_first_active_get(d40c))) { | 
 | 966 | 		d40_desc_remove(d40d); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 967 | 		d40_desc_free(d40c, d40d); | 
 | 968 | 	} | 
 | 969 |  | 
 | 970 | 	/* Release queued descriptors waiting for transfer */ | 
 | 971 | 	while ((d40d = d40_first_queued(d40c))) { | 
 | 972 | 		d40_desc_remove(d40d); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 973 | 		d40_desc_free(d40c, d40d); | 
 | 974 | 	} | 
 | 975 |  | 
| Per Forlin | a8f3067 | 2011-06-26 23:29:52 +0200 | [diff] [blame] | 976 | 	/* Release pending descriptors */ | 
 | 977 | 	while ((d40d = d40_first_pending(d40c))) { | 
 | 978 | 		d40_desc_remove(d40d); | 
 | 979 | 		d40_desc_free(d40c, d40d); | 
 | 980 | 	} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 981 |  | 
| Per Forlin | 7404368 | 2011-08-29 13:33:34 +0200 | [diff] [blame] | 982 | 	/* Release client owned descriptors */ | 
 | 983 | 	if (!list_empty(&d40c->client)) | 
 | 984 | 		list_for_each_entry_safe(d40d, _d, &d40c->client, node) { | 
 | 985 | 			d40_desc_remove(d40d); | 
 | 986 | 			d40_desc_free(d40c, d40d); | 
 | 987 | 		} | 
 | 988 |  | 
| Per Forlin | 82babbb36 | 2011-08-29 13:33:35 +0200 | [diff] [blame] | 989 | 	/* Release descriptors in prepare queue */ | 
 | 990 | 	if (!list_empty(&d40c->prepare_queue)) | 
 | 991 | 		list_for_each_entry_safe(d40d, _d, | 
 | 992 | 					 &d40c->prepare_queue, node) { | 
 | 993 | 			d40_desc_remove(d40d); | 
 | 994 | 			d40_desc_free(d40c, d40d); | 
 | 995 | 		} | 
| Per Forlin | 7404368 | 2011-08-29 13:33:34 +0200 | [diff] [blame] | 996 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 997 | 	d40c->pending_tx = 0; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 998 | } | 
 | 999 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1000 | static void __d40_config_set_event(struct d40_chan *d40c, | 
 | 1001 | 				   enum d40_events event_type, u32 event, | 
 | 1002 | 				   int reg) | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1003 | { | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1004 | 	void __iomem *addr = chan_base(d40c) + reg; | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1005 | 	int tries; | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1006 | 	u32 status; | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1007 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1008 | 	switch (event_type) { | 
 | 1009 |  | 
 | 1010 | 	case D40_DEACTIVATE_EVENTLINE: | 
 | 1011 |  | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1012 | 		writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | 
 | 1013 | 		       | ~D40_EVENTLINE_MASK(event), addr); | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1014 | 		break; | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1015 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1016 | 	case D40_SUSPEND_REQ_EVENTLINE: | 
 | 1017 | 		status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | 
 | 1018 | 			  D40_EVENTLINE_POS(event); | 
 | 1019 |  | 
 | 1020 | 		if (status == D40_DEACTIVATE_EVENTLINE || | 
 | 1021 | 		    status == D40_SUSPEND_REQ_EVENTLINE) | 
 | 1022 | 			break; | 
 | 1023 |  | 
 | 1024 | 		writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) | 
 | 1025 | 		       | ~D40_EVENTLINE_MASK(event), addr); | 
 | 1026 |  | 
 | 1027 | 		for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { | 
 | 1028 |  | 
 | 1029 | 			status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | 
 | 1030 | 				  D40_EVENTLINE_POS(event); | 
 | 1031 |  | 
 | 1032 | 			cpu_relax(); | 
 | 1033 | 			/* | 
 | 1034 | 			 * Reduce the number of bus accesses while | 
 | 1035 | 			 * waiting for the DMA to suspend. | 
 | 1036 | 			 */ | 
 | 1037 | 			udelay(3); | 
 | 1038 |  | 
 | 1039 | 			if (status == D40_DEACTIVATE_EVENTLINE) | 
 | 1040 | 				break; | 
 | 1041 | 		} | 
 | 1042 |  | 
 | 1043 | 		if (tries == D40_SUSPEND_MAX_IT) { | 
 | 1044 | 			chan_err(d40c, | 
 | 1045 | 				"unable to stop the event_line chl %d (log: %d)" | 
 | 1046 | 				"status %x\n", d40c->phy_chan->num, | 
 | 1047 | 				 d40c->log_num, status); | 
 | 1048 | 		} | 
 | 1049 | 		break; | 
 | 1050 |  | 
 | 1051 | 	case D40_ACTIVATE_EVENTLINE: | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1052 | 	/* | 
 | 1053 | 	 * The hardware sometimes doesn't register the enable when src and dst | 
 | 1054 | 	 * event lines are active on the same logical channel.  Retry to ensure | 
 | 1055 | 	 * it does.  Usually only one retry is sufficient. | 
 | 1056 | 	 */ | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1057 | 		tries = 100; | 
 | 1058 | 		while (--tries) { | 
 | 1059 | 			writel((D40_ACTIVATE_EVENTLINE << | 
 | 1060 | 				D40_EVENTLINE_POS(event)) | | 
 | 1061 | 				~D40_EVENTLINE_MASK(event), addr); | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1062 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1063 | 			if (readl(addr) & D40_EVENTLINE_MASK(event)) | 
 | 1064 | 				break; | 
 | 1065 | 		} | 
 | 1066 |  | 
 | 1067 | 		if (tries != 99) | 
 | 1068 | 			dev_dbg(chan2dev(d40c), | 
 | 1069 | 				"[%s] workaround enable S%cLNK (%d tries)\n", | 
 | 1070 | 				__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | 
 | 1071 | 				100 - tries); | 
 | 1072 |  | 
 | 1073 | 		WARN_ON(!tries); | 
 | 1074 | 		break; | 
 | 1075 |  | 
 | 1076 | 	case D40_ROUND_EVENTLINE: | 
 | 1077 | 		BUG(); | 
 | 1078 | 		break; | 
 | 1079 |  | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1080 | 	} | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1081 | } | 
 | 1082 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1083 | static void d40_config_set_event(struct d40_chan *d40c, | 
 | 1084 | 				 enum d40_events event_type) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1085 | { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1086 | 	/* Enable event line connected to device (or memcpy) */ | 
 | 1087 | 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) || | 
 | 1088 | 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 
 | 1089 | 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 
 | 1090 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1091 | 		__d40_config_set_event(d40c, event_type, event, | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1092 | 				       D40_CHAN_REG_SSLNK); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1093 | 	} | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1094 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1095 | 	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) { | 
 | 1096 | 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 
 | 1097 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1098 | 		__d40_config_set_event(d40c, event_type, event, | 
| Rabin Vincent | 262d291 | 2011-01-25 11:18:05 +0100 | [diff] [blame] | 1099 | 				       D40_CHAN_REG_SDLNK); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1100 | 	} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1101 | } | 
 | 1102 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1103 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1104 | { | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1105 | 	void __iomem *chanbase = chan_base(d40c); | 
| Jonas Aaberg | be8cb7d | 2010-08-09 12:07:44 +0000 | [diff] [blame] | 1106 | 	u32 val; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1107 |  | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1108 | 	val = readl(chanbase + D40_CHAN_REG_SSLNK); | 
 | 1109 | 	val |= readl(chanbase + D40_CHAN_REG_SDLNK); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1110 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1111 | 	return val; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1112 | } | 
 | 1113 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1114 | static int | 
 | 1115 | __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) | 
 | 1116 | { | 
 | 1117 | 	unsigned long flags; | 
 | 1118 | 	int ret = 0; | 
 | 1119 | 	u32 active_status; | 
 | 1120 | 	void __iomem *active_reg; | 
 | 1121 |  | 
 | 1122 | 	if (d40c->phy_chan->num % 2 == 0) | 
 | 1123 | 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 
 | 1124 | 	else | 
 | 1125 | 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | 
 | 1126 |  | 
 | 1127 |  | 
 | 1128 | 	spin_lock_irqsave(&d40c->phy_chan->lock, flags); | 
 | 1129 |  | 
 | 1130 | 	switch (command) { | 
 | 1131 | 	case D40_DMA_STOP: | 
 | 1132 | 	case D40_DMA_SUSPEND_REQ: | 
 | 1133 |  | 
 | 1134 | 		active_status = (readl(active_reg) & | 
 | 1135 | 				 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 
 | 1136 | 				 D40_CHAN_POS(d40c->phy_chan->num); | 
 | 1137 |  | 
 | 1138 | 		if (active_status == D40_DMA_RUN) | 
 | 1139 | 			d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); | 
 | 1140 | 		else | 
 | 1141 | 			d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); | 
 | 1142 |  | 
 | 1143 | 		if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) | 
 | 1144 | 			ret = __d40_execute_command_phy(d40c, command); | 
 | 1145 |  | 
 | 1146 | 		break; | 
 | 1147 |  | 
 | 1148 | 	case D40_DMA_RUN: | 
 | 1149 |  | 
 | 1150 | 		d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); | 
 | 1151 | 		ret = __d40_execute_command_phy(d40c, command); | 
 | 1152 | 		break; | 
 | 1153 |  | 
 | 1154 | 	case D40_DMA_SUSPENDED: | 
 | 1155 | 		BUG(); | 
 | 1156 | 		break; | 
 | 1157 | 	} | 
 | 1158 |  | 
 | 1159 | 	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | 
 | 1160 | 	return ret; | 
 | 1161 | } | 
 | 1162 |  | 
 | 1163 | static int d40_channel_execute_command(struct d40_chan *d40c, | 
 | 1164 | 				       enum d40_command command) | 
 | 1165 | { | 
 | 1166 | 	if (chan_is_logical(d40c)) | 
 | 1167 | 		return __d40_execute_command_log(d40c, command); | 
 | 1168 | 	else | 
 | 1169 | 		return __d40_execute_command_phy(d40c, command); | 
 | 1170 | } | 
 | 1171 |  | 
| Rabin Vincent | 20a5b6d | 2010-10-12 13:00:52 +0000 | [diff] [blame] | 1172 | static u32 d40_get_prmo(struct d40_chan *d40c) | 
 | 1173 | { | 
 | 1174 | 	static const unsigned int phy_map[] = { | 
 | 1175 | 		[STEDMA40_PCHAN_BASIC_MODE] | 
 | 1176 | 			= D40_DREG_PRMO_PCHAN_BASIC, | 
 | 1177 | 		[STEDMA40_PCHAN_MODULO_MODE] | 
 | 1178 | 			= D40_DREG_PRMO_PCHAN_MODULO, | 
 | 1179 | 		[STEDMA40_PCHAN_DOUBLE_DST_MODE] | 
 | 1180 | 			= D40_DREG_PRMO_PCHAN_DOUBLE_DST, | 
 | 1181 | 	}; | 
 | 1182 | 	static const unsigned int log_map[] = { | 
 | 1183 | 		[STEDMA40_LCHAN_SRC_PHY_DST_LOG] | 
 | 1184 | 			= D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, | 
 | 1185 | 		[STEDMA40_LCHAN_SRC_LOG_DST_PHY] | 
 | 1186 | 			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, | 
 | 1187 | 		[STEDMA40_LCHAN_SRC_LOG_DST_LOG] | 
 | 1188 | 			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, | 
 | 1189 | 	}; | 
 | 1190 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 1191 | 	if (chan_is_physical(d40c)) | 
| Rabin Vincent | 20a5b6d | 2010-10-12 13:00:52 +0000 | [diff] [blame] | 1192 | 		return phy_map[d40c->dma_cfg.mode_opt]; | 
 | 1193 | 	else | 
 | 1194 | 		return log_map[d40c->dma_cfg.mode_opt]; | 
 | 1195 | } | 
 | 1196 |  | 
| Jonas Aaberg | b55912c | 2010-08-09 12:08:02 +0000 | [diff] [blame] | 1197 | static void d40_config_write(struct d40_chan *d40c) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1198 | { | 
 | 1199 | 	u32 addr_base; | 
 | 1200 | 	u32 var; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1201 |  | 
 | 1202 | 	/* Odd addresses are even addresses + 4 */ | 
 | 1203 | 	addr_base = (d40c->phy_chan->num % 2) * 4; | 
 | 1204 | 	/* Setup channel mode to logical or physical */ | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 1205 | 	var = ((u32)(chan_is_logical(d40c)) + 1) << | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1206 | 		D40_CHAN_POS(d40c->phy_chan->num); | 
 | 1207 | 	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | 
 | 1208 |  | 
 | 1209 | 	/* Setup operational mode option register */ | 
| Rabin Vincent | 20a5b6d | 2010-10-12 13:00:52 +0000 | [diff] [blame] | 1210 | 	var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1211 |  | 
 | 1212 | 	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | 
 | 1213 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 1214 | 	if (chan_is_logical(d40c)) { | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1215 | 		int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) | 
 | 1216 | 			   & D40_SREG_ELEM_LOG_LIDX_MASK; | 
 | 1217 | 		void __iomem *chanbase = chan_base(d40c); | 
 | 1218 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1219 | 		/* Set default config for CFG reg */ | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1220 | 		writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); | 
 | 1221 | 		writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1222 |  | 
| Jonas Aaberg | b55912c | 2010-08-09 12:08:02 +0000 | [diff] [blame] | 1223 | 		/* Set LIDX for lcla */ | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1224 | 		writel(lidx, chanbase + D40_CHAN_REG_SSELT); | 
 | 1225 | 		writel(lidx, chanbase + D40_CHAN_REG_SDELT); | 
| Rabin Vincent | e9f3a49 | 2011-12-28 11:27:40 +0530 | [diff] [blame] | 1226 |  | 
 | 1227 | 		/* Clear LNK which will be used by d40_chan_has_events() */ | 
 | 1228 | 		writel(0, chanbase + D40_CHAN_REG_SSLNK); | 
 | 1229 | 		writel(0, chanbase + D40_CHAN_REG_SDLNK); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1230 | 	} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1231 | } | 
 | 1232 |  | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1233 | static u32 d40_residue(struct d40_chan *d40c) | 
 | 1234 | { | 
 | 1235 | 	u32 num_elt; | 
 | 1236 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 1237 | 	if (chan_is_logical(d40c)) | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1238 | 		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | 
 | 1239 | 			>> D40_MEM_LCSP2_ECNT_POS; | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1240 | 	else { | 
 | 1241 | 		u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); | 
 | 1242 | 		num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) | 
 | 1243 | 			  >> D40_SREG_ELEM_PHY_ECNT_POS; | 
 | 1244 | 	} | 
 | 1245 |  | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1246 | 	return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | 
 | 1247 | } | 
 | 1248 |  | 
 | 1249 | static bool d40_tx_is_linked(struct d40_chan *d40c) | 
 | 1250 | { | 
 | 1251 | 	bool is_link; | 
 | 1252 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 1253 | 	if (chan_is_logical(d40c)) | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1254 | 		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK; | 
 | 1255 | 	else | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1256 | 		is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) | 
 | 1257 | 			  & D40_SREG_LNK_PHYS_LNK_MASK; | 
 | 1258 |  | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1259 | 	return is_link; | 
 | 1260 | } | 
 | 1261 |  | 
| Rabin Vincent | 86eb5fb | 2011-01-25 11:18:34 +0100 | [diff] [blame] | 1262 | static int d40_pause(struct d40_chan *d40c) | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1263 | { | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1264 | 	int res = 0; | 
 | 1265 | 	unsigned long flags; | 
 | 1266 |  | 
| Jonas Aaberg | 3ac012a | 2010-08-09 12:09:12 +0000 | [diff] [blame] | 1267 | 	if (!d40c->busy) | 
 | 1268 | 		return 0; | 
 | 1269 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1270 | 	pm_runtime_get_sync(d40c->base->dev); | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1271 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 1272 |  | 
 | 1273 | 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1274 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1275 | 	pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 1276 | 	pm_runtime_put_autosuspend(d40c->base->dev); | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1277 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1278 | 	return res; | 
 | 1279 | } | 
 | 1280 |  | 
| Rabin Vincent | 86eb5fb | 2011-01-25 11:18:34 +0100 | [diff] [blame] | 1281 | static int d40_resume(struct d40_chan *d40c) | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1282 | { | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1283 | 	int res = 0; | 
 | 1284 | 	unsigned long flags; | 
 | 1285 |  | 
| Jonas Aaberg | 3ac012a | 2010-08-09 12:09:12 +0000 | [diff] [blame] | 1286 | 	if (!d40c->busy) | 
 | 1287 | 		return 0; | 
 | 1288 |  | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1289 | 	spin_lock_irqsave(&d40c->lock, flags); | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1290 | 	pm_runtime_get_sync(d40c->base->dev); | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1291 |  | 
 | 1292 | 	/* If bytes left to transfer or linked tx resume job */ | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1293 | 	if (d40_residue(d40c) || d40_tx_is_linked(d40c)) | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1294 | 		res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1295 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1296 | 	pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 1297 | 	pm_runtime_put_autosuspend(d40c->base->dev); | 
| Jonas Aaberg | aa182ae | 2010-08-09 12:08:26 +0000 | [diff] [blame] | 1298 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1299 | 	return res; | 
 | 1300 | } | 
 | 1301 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1302 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 
 | 1303 | { | 
 | 1304 | 	struct d40_chan *d40c = container_of(tx->chan, | 
 | 1305 | 					     struct d40_chan, | 
 | 1306 | 					     chan); | 
 | 1307 | 	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 
 | 1308 | 	unsigned long flags; | 
| Russell King - ARM Linux | 884485e | 2012-03-06 22:34:46 +0000 | [diff] [blame] | 1309 | 	dma_cookie_t cookie; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1310 |  | 
 | 1311 | 	spin_lock_irqsave(&d40c->lock, flags); | 
| Russell King - ARM Linux | 884485e | 2012-03-06 22:34:46 +0000 | [diff] [blame] | 1312 | 	cookie = dma_cookie_assign(tx); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1313 | 	d40_desc_queue(d40c, d40d); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1314 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1315 |  | 
| Russell King - ARM Linux | 884485e | 2012-03-06 22:34:46 +0000 | [diff] [blame] | 1316 | 	return cookie; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1317 | } | 
 | 1318 |  | 
 | 1319 | static int d40_start(struct d40_chan *d40c) | 
 | 1320 | { | 
| Jonas Aaberg | 0c32269 | 2010-06-20 21:25:46 +0000 | [diff] [blame] | 1321 | 	return d40_channel_execute_command(d40c, D40_DMA_RUN); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1322 | } | 
 | 1323 |  | 
 | 1324 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | 
 | 1325 | { | 
 | 1326 | 	struct d40_desc *d40d; | 
 | 1327 | 	int err; | 
 | 1328 |  | 
 | 1329 | 	/* Start queued jobs, if any */ | 
 | 1330 | 	d40d = d40_first_queued(d40c); | 
 | 1331 |  | 
 | 1332 | 	if (d40d != NULL) { | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1333 | 		if (!d40c->busy) { | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1334 | 			d40c->busy = true; | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1335 | 			pm_runtime_get_sync(d40c->base->dev); | 
 | 1336 | 		} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1337 |  | 
 | 1338 | 		/* Remove from queue */ | 
 | 1339 | 		d40_desc_remove(d40d); | 
 | 1340 |  | 
 | 1341 | 		/* Add to active queue */ | 
 | 1342 | 		d40_desc_submit(d40c, d40d); | 
 | 1343 |  | 
| Rabin Vincent | 7d83a85 | 2011-01-25 11:18:06 +0100 | [diff] [blame] | 1344 | 		/* Initiate DMA job */ | 
 | 1345 | 		d40_desc_load(d40c, d40d); | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 1346 |  | 
| Rabin Vincent | 7d83a85 | 2011-01-25 11:18:06 +0100 | [diff] [blame] | 1347 | 		/* Start dma job */ | 
 | 1348 | 		err = d40_start(d40c); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1349 |  | 
| Rabin Vincent | 7d83a85 | 2011-01-25 11:18:06 +0100 | [diff] [blame] | 1350 | 		if (err) | 
 | 1351 | 			return NULL; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1352 | 	} | 
 | 1353 |  | 
 | 1354 | 	return d40d; | 
 | 1355 | } | 
 | 1356 |  | 
 | 1357 | /* called from interrupt context */ | 
 | 1358 | static void dma_tc_handle(struct d40_chan *d40c) | 
 | 1359 | { | 
 | 1360 | 	struct d40_desc *d40d; | 
 | 1361 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1362 | 	/* Get first active entry from list */ | 
 | 1363 | 	d40d = d40_first_active_get(d40c); | 
 | 1364 |  | 
 | 1365 | 	if (d40d == NULL) | 
 | 1366 | 		return; | 
 | 1367 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 1368 | 	if (d40d->cyclic) { | 
 | 1369 | 		/* | 
 | 1370 | 		 * If this was a paritially loaded list, we need to reloaded | 
 | 1371 | 		 * it, and only when the list is completed.  We need to check | 
 | 1372 | 		 * for done because the interrupt will hit for every link, and | 
 | 1373 | 		 * not just the last one. | 
 | 1374 | 		 */ | 
 | 1375 | 		if (d40d->lli_current < d40d->lli_len | 
 | 1376 | 		    && !d40_tx_is_linked(d40c) | 
 | 1377 | 		    && !d40_residue(d40c)) { | 
 | 1378 | 			d40_lcla_free_all(d40c, d40d); | 
 | 1379 | 			d40_desc_load(d40c, d40d); | 
 | 1380 | 			(void) d40_start(d40c); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1381 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 1382 | 			if (d40d->lli_current == d40d->lli_len) | 
 | 1383 | 				d40d->lli_current = 0; | 
 | 1384 | 		} | 
 | 1385 | 	} else { | 
 | 1386 | 		d40_lcla_free_all(d40c, d40d); | 
 | 1387 |  | 
 | 1388 | 		if (d40d->lli_current < d40d->lli_len) { | 
 | 1389 | 			d40_desc_load(d40c, d40d); | 
 | 1390 | 			/* Start dma job */ | 
 | 1391 | 			(void) d40_start(d40c); | 
 | 1392 | 			return; | 
 | 1393 | 		} | 
 | 1394 |  | 
 | 1395 | 		if (d40_queue_start(d40c) == NULL) | 
 | 1396 | 			d40c->busy = false; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1397 | 		pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 1398 | 		pm_runtime_put_autosuspend(d40c->base->dev); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1399 | 	} | 
 | 1400 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1401 | 	d40c->pending_tx++; | 
 | 1402 | 	tasklet_schedule(&d40c->tasklet); | 
 | 1403 |  | 
 | 1404 | } | 
 | 1405 |  | 
 | 1406 | static void dma_tasklet(unsigned long data) | 
 | 1407 | { | 
 | 1408 | 	struct d40_chan *d40c = (struct d40_chan *) data; | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 1409 | 	struct d40_desc *d40d; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1410 | 	unsigned long flags; | 
 | 1411 | 	dma_async_tx_callback callback; | 
 | 1412 | 	void *callback_param; | 
 | 1413 |  | 
 | 1414 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 1415 |  | 
 | 1416 | 	/* Get first active entry from list */ | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 1417 | 	d40d = d40_first_active_get(d40c); | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 1418 | 	if (d40d == NULL) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1419 | 		goto err; | 
 | 1420 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 1421 | 	if (!d40d->cyclic) | 
| Russell King - ARM Linux | f7fbce0 | 2012-03-06 22:35:07 +0000 | [diff] [blame] | 1422 | 		dma_cookie_complete(&d40d->txd); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1423 |  | 
 | 1424 | 	/* | 
 | 1425 | 	 * If terminating a channel pending_tx is set to zero. | 
 | 1426 | 	 * This prevents any finished active jobs to return to the client. | 
 | 1427 | 	 */ | 
 | 1428 | 	if (d40c->pending_tx == 0) { | 
 | 1429 | 		spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1430 | 		return; | 
 | 1431 | 	} | 
 | 1432 |  | 
 | 1433 | 	/* Callback to client */ | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 1434 | 	callback = d40d->txd.callback; | 
 | 1435 | 	callback_param = d40d->txd.callback_param; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1436 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 1437 | 	if (!d40d->cyclic) { | 
 | 1438 | 		if (async_tx_test_ack(&d40d->txd)) { | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 1439 | 			d40_desc_remove(d40d); | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 1440 | 			d40_desc_free(d40c, d40d); | 
 | 1441 | 		} else { | 
 | 1442 | 			if (!d40d->is_in_client_list) { | 
 | 1443 | 				d40_desc_remove(d40d); | 
 | 1444 | 				d40_lcla_free_all(d40c, d40d); | 
 | 1445 | 				list_add_tail(&d40d->node, &d40c->client); | 
 | 1446 | 				d40d->is_in_client_list = true; | 
 | 1447 | 			} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1448 | 		} | 
 | 1449 | 	} | 
 | 1450 |  | 
 | 1451 | 	d40c->pending_tx--; | 
 | 1452 |  | 
 | 1453 | 	if (d40c->pending_tx) | 
 | 1454 | 		tasklet_schedule(&d40c->tasklet); | 
 | 1455 |  | 
 | 1456 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1457 |  | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 1458 | 	if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1459 | 		callback(callback_param); | 
 | 1460 |  | 
 | 1461 | 	return; | 
 | 1462 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1463 | err: | 
 | 1464 | 	/* Rescue manouver if receiving double interrupts */ | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1465 | 	if (d40c->pending_tx > 0) | 
 | 1466 | 		d40c->pending_tx--; | 
 | 1467 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1468 | } | 
 | 1469 |  | 
 | 1470 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | 
 | 1471 | { | 
 | 1472 | 	static const struct d40_interrupt_lookup il[] = { | 
 | 1473 | 		{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0}, | 
 | 1474 | 		{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | 
 | 1475 | 		{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | 
 | 1476 | 		{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | 
 | 1477 | 		{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0}, | 
 | 1478 | 		{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32}, | 
 | 1479 | 		{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64}, | 
 | 1480 | 		{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96}, | 
 | 1481 | 		{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN}, | 
 | 1482 | 		{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN}, | 
 | 1483 | 	}; | 
 | 1484 |  | 
 | 1485 | 	int i; | 
 | 1486 | 	u32 regs[ARRAY_SIZE(il)]; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1487 | 	u32 idx; | 
 | 1488 | 	u32 row; | 
 | 1489 | 	long chan = -1; | 
 | 1490 | 	struct d40_chan *d40c; | 
 | 1491 | 	unsigned long flags; | 
 | 1492 | 	struct d40_base *base = data; | 
 | 1493 |  | 
 | 1494 | 	spin_lock_irqsave(&base->interrupt_lock, flags); | 
 | 1495 |  | 
 | 1496 | 	/* Read interrupt status of both logical and physical channels */ | 
 | 1497 | 	for (i = 0; i < ARRAY_SIZE(il); i++) | 
 | 1498 | 		regs[i] = readl(base->virtbase + il[i].src); | 
 | 1499 |  | 
 | 1500 | 	for (;;) { | 
 | 1501 |  | 
 | 1502 | 		chan = find_next_bit((unsigned long *)regs, | 
 | 1503 | 				     BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); | 
 | 1504 |  | 
 | 1505 | 		/* No more set bits found? */ | 
 | 1506 | 		if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) | 
 | 1507 | 			break; | 
 | 1508 |  | 
 | 1509 | 		row = chan / BITS_PER_LONG; | 
 | 1510 | 		idx = chan & (BITS_PER_LONG - 1); | 
 | 1511 |  | 
 | 1512 | 		/* ACK interrupt */ | 
| Jonas Aaberg | 1b00348 | 2010-08-09 12:07:54 +0000 | [diff] [blame] | 1513 | 		writel(1 << idx, base->virtbase + il[row].clr); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1514 |  | 
 | 1515 | 		if (il[row].offset == D40_PHY_CHAN) | 
 | 1516 | 			d40c = base->lookup_phy_chans[idx]; | 
 | 1517 | 		else | 
 | 1518 | 			d40c = base->lookup_log_chans[il[row].offset + idx]; | 
 | 1519 | 		spin_lock(&d40c->lock); | 
 | 1520 |  | 
 | 1521 | 		if (!il[row].is_error) | 
 | 1522 | 			dma_tc_handle(d40c); | 
 | 1523 | 		else | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1524 | 			d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", | 
 | 1525 | 				chan, il[row].offset, idx); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1526 |  | 
 | 1527 | 		spin_unlock(&d40c->lock); | 
 | 1528 | 	} | 
 | 1529 |  | 
 | 1530 | 	spin_unlock_irqrestore(&base->interrupt_lock, flags); | 
 | 1531 |  | 
 | 1532 | 	return IRQ_HANDLED; | 
 | 1533 | } | 
 | 1534 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1535 | static int d40_validate_conf(struct d40_chan *d40c, | 
 | 1536 | 			     struct stedma40_chan_cfg *conf) | 
 | 1537 | { | 
 | 1538 | 	int res = 0; | 
 | 1539 | 	u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | 
 | 1540 | 	u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | 
| Rabin Vincent | 38bdbf0 | 2010-10-12 13:00:51 +0000 | [diff] [blame] | 1541 | 	bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1542 |  | 
| Linus Walleij | 0747c7b | 2010-08-09 12:07:36 +0000 | [diff] [blame] | 1543 | 	if (!conf->dir) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1544 | 		chan_err(d40c, "Invalid direction.\n"); | 
| Linus Walleij | 0747c7b | 2010-08-09 12:07:36 +0000 | [diff] [blame] | 1545 | 		res = -EINVAL; | 
 | 1546 | 	} | 
 | 1547 |  | 
 | 1548 | 	if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && | 
 | 1549 | 	    d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && | 
 | 1550 | 	    d40c->runtime_addr == 0) { | 
 | 1551 |  | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1552 | 		chan_err(d40c, "Invalid TX channel address (%d)\n", | 
 | 1553 | 			 conf->dst_dev_type); | 
| Linus Walleij | 0747c7b | 2010-08-09 12:07:36 +0000 | [diff] [blame] | 1554 | 		res = -EINVAL; | 
 | 1555 | 	} | 
 | 1556 |  | 
 | 1557 | 	if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && | 
 | 1558 | 	    d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && | 
 | 1559 | 	    d40c->runtime_addr == 0) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1560 | 		chan_err(d40c, "Invalid RX channel address (%d)\n", | 
 | 1561 | 			conf->src_dev_type); | 
| Linus Walleij | 0747c7b | 2010-08-09 12:07:36 +0000 | [diff] [blame] | 1562 | 		res = -EINVAL; | 
 | 1563 | 	} | 
 | 1564 |  | 
 | 1565 | 	if (conf->dir == STEDMA40_MEM_TO_PERIPH && | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1566 | 	    dst_event_group == STEDMA40_DEV_DST_MEMORY) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1567 | 		chan_err(d40c, "Invalid dst\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1568 | 		res = -EINVAL; | 
 | 1569 | 	} | 
 | 1570 |  | 
| Linus Walleij | 0747c7b | 2010-08-09 12:07:36 +0000 | [diff] [blame] | 1571 | 	if (conf->dir == STEDMA40_PERIPH_TO_MEM && | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1572 | 	    src_event_group == STEDMA40_DEV_SRC_MEMORY) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1573 | 		chan_err(d40c, "Invalid src\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1574 | 		res = -EINVAL; | 
 | 1575 | 	} | 
 | 1576 |  | 
 | 1577 | 	if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | 
 | 1578 | 	    dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1579 | 		chan_err(d40c, "No event line\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1580 | 		res = -EINVAL; | 
 | 1581 | 	} | 
 | 1582 |  | 
 | 1583 | 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | 
 | 1584 | 	    (src_event_group != dst_event_group)) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1585 | 		chan_err(d40c, "Invalid event group\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1586 | 		res = -EINVAL; | 
 | 1587 | 	} | 
 | 1588 |  | 
 | 1589 | 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { | 
 | 1590 | 		/* | 
 | 1591 | 		 * DMAC HW supports it. Will be added to this driver, | 
 | 1592 | 		 * in case any dma client requires it. | 
 | 1593 | 		 */ | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1594 | 		chan_err(d40c, "periph to periph not supported\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1595 | 		res = -EINVAL; | 
 | 1596 | 	} | 
 | 1597 |  | 
| Per Forlin | d49278e | 2010-12-20 18:31:38 +0100 | [diff] [blame] | 1598 | 	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * | 
 | 1599 | 	    (1 << conf->src_info.data_width) != | 
 | 1600 | 	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) * | 
 | 1601 | 	    (1 << conf->dst_info.data_width)) { | 
 | 1602 | 		/* | 
 | 1603 | 		 * The DMAC hardware only supports | 
 | 1604 | 		 * src (burst x width) == dst (burst x width) | 
 | 1605 | 		 */ | 
 | 1606 |  | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1607 | 		chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); | 
| Per Forlin | d49278e | 2010-12-20 18:31:38 +0100 | [diff] [blame] | 1608 | 		res = -EINVAL; | 
 | 1609 | 	} | 
 | 1610 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1611 | 	return res; | 
 | 1612 | } | 
 | 1613 |  | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1614 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, | 
 | 1615 | 			       bool is_src, int log_event_line, bool is_log, | 
 | 1616 | 			       bool *first_user) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1617 | { | 
 | 1618 | 	unsigned long flags; | 
 | 1619 | 	spin_lock_irqsave(&phy->lock, flags); | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1620 |  | 
 | 1621 | 	*first_user = ((phy->allocated_src | phy->allocated_dst) | 
 | 1622 | 			== D40_ALLOC_FREE); | 
 | 1623 |  | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1624 | 	if (!is_log) { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1625 | 		/* Physical interrupts are masked per physical full channel */ | 
 | 1626 | 		if (phy->allocated_src == D40_ALLOC_FREE && | 
 | 1627 | 		    phy->allocated_dst == D40_ALLOC_FREE) { | 
 | 1628 | 			phy->allocated_dst = D40_ALLOC_PHY; | 
 | 1629 | 			phy->allocated_src = D40_ALLOC_PHY; | 
 | 1630 | 			goto found; | 
 | 1631 | 		} else | 
 | 1632 | 			goto not_found; | 
 | 1633 | 	} | 
 | 1634 |  | 
 | 1635 | 	/* Logical channel */ | 
 | 1636 | 	if (is_src) { | 
 | 1637 | 		if (phy->allocated_src == D40_ALLOC_PHY) | 
 | 1638 | 			goto not_found; | 
 | 1639 |  | 
 | 1640 | 		if (phy->allocated_src == D40_ALLOC_FREE) | 
 | 1641 | 			phy->allocated_src = D40_ALLOC_LOG_FREE; | 
 | 1642 |  | 
 | 1643 | 		if (!(phy->allocated_src & (1 << log_event_line))) { | 
 | 1644 | 			phy->allocated_src |= 1 << log_event_line; | 
 | 1645 | 			goto found; | 
 | 1646 | 		} else | 
 | 1647 | 			goto not_found; | 
 | 1648 | 	} else { | 
 | 1649 | 		if (phy->allocated_dst == D40_ALLOC_PHY) | 
 | 1650 | 			goto not_found; | 
 | 1651 |  | 
 | 1652 | 		if (phy->allocated_dst == D40_ALLOC_FREE) | 
 | 1653 | 			phy->allocated_dst = D40_ALLOC_LOG_FREE; | 
 | 1654 |  | 
 | 1655 | 		if (!(phy->allocated_dst & (1 << log_event_line))) { | 
 | 1656 | 			phy->allocated_dst |= 1 << log_event_line; | 
 | 1657 | 			goto found; | 
 | 1658 | 		} else | 
 | 1659 | 			goto not_found; | 
 | 1660 | 	} | 
 | 1661 |  | 
 | 1662 | not_found: | 
 | 1663 | 	spin_unlock_irqrestore(&phy->lock, flags); | 
 | 1664 | 	return false; | 
 | 1665 | found: | 
 | 1666 | 	spin_unlock_irqrestore(&phy->lock, flags); | 
 | 1667 | 	return true; | 
 | 1668 | } | 
 | 1669 |  | 
 | 1670 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | 
 | 1671 | 			       int log_event_line) | 
 | 1672 | { | 
 | 1673 | 	unsigned long flags; | 
 | 1674 | 	bool is_free = false; | 
 | 1675 |  | 
 | 1676 | 	spin_lock_irqsave(&phy->lock, flags); | 
 | 1677 | 	if (!log_event_line) { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1678 | 		phy->allocated_dst = D40_ALLOC_FREE; | 
 | 1679 | 		phy->allocated_src = D40_ALLOC_FREE; | 
 | 1680 | 		is_free = true; | 
 | 1681 | 		goto out; | 
 | 1682 | 	} | 
 | 1683 |  | 
 | 1684 | 	/* Logical channel */ | 
 | 1685 | 	if (is_src) { | 
 | 1686 | 		phy->allocated_src &= ~(1 << log_event_line); | 
 | 1687 | 		if (phy->allocated_src == D40_ALLOC_LOG_FREE) | 
 | 1688 | 			phy->allocated_src = D40_ALLOC_FREE; | 
 | 1689 | 	} else { | 
 | 1690 | 		phy->allocated_dst &= ~(1 << log_event_line); | 
 | 1691 | 		if (phy->allocated_dst == D40_ALLOC_LOG_FREE) | 
 | 1692 | 			phy->allocated_dst = D40_ALLOC_FREE; | 
 | 1693 | 	} | 
 | 1694 |  | 
 | 1695 | 	is_free = ((phy->allocated_src | phy->allocated_dst) == | 
 | 1696 | 		   D40_ALLOC_FREE); | 
 | 1697 |  | 
 | 1698 | out: | 
 | 1699 | 	spin_unlock_irqrestore(&phy->lock, flags); | 
 | 1700 |  | 
 | 1701 | 	return is_free; | 
 | 1702 | } | 
 | 1703 |  | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1704 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1705 | { | 
 | 1706 | 	int dev_type; | 
 | 1707 | 	int event_group; | 
 | 1708 | 	int event_line; | 
 | 1709 | 	struct d40_phy_res *phys; | 
 | 1710 | 	int i; | 
 | 1711 | 	int j; | 
 | 1712 | 	int log_num; | 
 | 1713 | 	bool is_src; | 
| Rabin Vincent | 38bdbf0 | 2010-10-12 13:00:51 +0000 | [diff] [blame] | 1714 | 	bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1715 |  | 
 | 1716 | 	phys = d40c->base->phy_res; | 
 | 1717 |  | 
 | 1718 | 	if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 
 | 1719 | 		dev_type = d40c->dma_cfg.src_dev_type; | 
 | 1720 | 		log_num = 2 * dev_type; | 
 | 1721 | 		is_src = true; | 
 | 1722 | 	} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 
 | 1723 | 		   d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 
 | 1724 | 		/* dst event lines are used for logical memcpy */ | 
 | 1725 | 		dev_type = d40c->dma_cfg.dst_dev_type; | 
 | 1726 | 		log_num = 2 * dev_type + 1; | 
 | 1727 | 		is_src = false; | 
 | 1728 | 	} else | 
 | 1729 | 		return -EINVAL; | 
 | 1730 |  | 
 | 1731 | 	event_group = D40_TYPE_TO_GROUP(dev_type); | 
 | 1732 | 	event_line = D40_TYPE_TO_EVENT(dev_type); | 
 | 1733 |  | 
 | 1734 | 	if (!is_log) { | 
 | 1735 | 		if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 
 | 1736 | 			/* Find physical half channel */ | 
 | 1737 | 			for (i = 0; i < d40c->base->num_phy_chans; i++) { | 
 | 1738 |  | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1739 | 				if (d40_alloc_mask_set(&phys[i], is_src, | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1740 | 						       0, is_log, | 
 | 1741 | 						       first_phy_user)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1742 | 					goto found_phy; | 
 | 1743 | 			} | 
 | 1744 | 		} else | 
 | 1745 | 			for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 
 | 1746 | 				int phy_num = j  + event_group * 2; | 
 | 1747 | 				for (i = phy_num; i < phy_num + 2; i++) { | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 1748 | 					if (d40_alloc_mask_set(&phys[i], | 
 | 1749 | 							       is_src, | 
 | 1750 | 							       0, | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1751 | 							       is_log, | 
 | 1752 | 							       first_phy_user)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1753 | 						goto found_phy; | 
 | 1754 | 				} | 
 | 1755 | 			} | 
 | 1756 | 		return -EINVAL; | 
 | 1757 | found_phy: | 
 | 1758 | 		d40c->phy_chan = &phys[i]; | 
 | 1759 | 		d40c->log_num = D40_PHY_CHAN; | 
 | 1760 | 		goto out; | 
 | 1761 | 	} | 
 | 1762 | 	if (dev_type == -1) | 
 | 1763 | 		return -EINVAL; | 
 | 1764 |  | 
 | 1765 | 	/* Find logical channel */ | 
 | 1766 | 	for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 
 | 1767 | 		int phy_num = j + event_group * 2; | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1768 |  | 
 | 1769 | 		if (d40c->dma_cfg.use_fixed_channel) { | 
 | 1770 | 			i = d40c->dma_cfg.phy_channel; | 
 | 1771 |  | 
 | 1772 | 			if ((i != phy_num) && (i != phy_num + 1)) { | 
 | 1773 | 				dev_err(chan2dev(d40c), | 
 | 1774 | 					"invalid fixed phy channel %d\n", i); | 
 | 1775 | 				return -EINVAL; | 
 | 1776 | 			} | 
 | 1777 |  | 
 | 1778 | 			if (d40_alloc_mask_set(&phys[i], is_src, event_line, | 
 | 1779 | 					       is_log, first_phy_user)) | 
 | 1780 | 				goto found_log; | 
 | 1781 |  | 
 | 1782 | 			dev_err(chan2dev(d40c), | 
 | 1783 | 				"could not allocate fixed phy channel %d\n", i); | 
 | 1784 | 			return -EINVAL; | 
 | 1785 | 		} | 
 | 1786 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1787 | 		/* | 
 | 1788 | 		 * Spread logical channels across all available physical rather | 
 | 1789 | 		 * than pack every logical channel at the first available phy | 
 | 1790 | 		 * channels. | 
 | 1791 | 		 */ | 
 | 1792 | 		if (is_src) { | 
 | 1793 | 			for (i = phy_num; i < phy_num + 2; i++) { | 
 | 1794 | 				if (d40_alloc_mask_set(&phys[i], is_src, | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1795 | 						       event_line, is_log, | 
 | 1796 | 						       first_phy_user)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1797 | 					goto found_log; | 
 | 1798 | 			} | 
 | 1799 | 		} else { | 
 | 1800 | 			for (i = phy_num + 1; i >= phy_num; i--) { | 
 | 1801 | 				if (d40_alloc_mask_set(&phys[i], is_src, | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 1802 | 						       event_line, is_log, | 
 | 1803 | 						       first_phy_user)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1804 | 					goto found_log; | 
 | 1805 | 			} | 
 | 1806 | 		} | 
 | 1807 | 	} | 
 | 1808 | 	return -EINVAL; | 
 | 1809 |  | 
 | 1810 | found_log: | 
 | 1811 | 	d40c->phy_chan = &phys[i]; | 
 | 1812 | 	d40c->log_num = log_num; | 
 | 1813 | out: | 
 | 1814 |  | 
 | 1815 | 	if (is_log) | 
 | 1816 | 		d40c->base->lookup_log_chans[d40c->log_num] = d40c; | 
 | 1817 | 	else | 
 | 1818 | 		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; | 
 | 1819 |  | 
 | 1820 | 	return 0; | 
 | 1821 |  | 
 | 1822 | } | 
 | 1823 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1824 | static int d40_config_memcpy(struct d40_chan *d40c) | 
 | 1825 | { | 
 | 1826 | 	dma_cap_mask_t cap = d40c->chan.device->cap_mask; | 
 | 1827 |  | 
 | 1828 | 	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { | 
 | 1829 | 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; | 
 | 1830 | 		d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; | 
 | 1831 | 		d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> | 
 | 1832 | 			memcpy[d40c->chan.chan_id]; | 
 | 1833 |  | 
 | 1834 | 	} else if (dma_has_cap(DMA_MEMCPY, cap) && | 
 | 1835 | 		   dma_has_cap(DMA_SLAVE, cap)) { | 
 | 1836 | 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | 
 | 1837 | 	} else { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1838 | 		chan_err(d40c, "No memcpy\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1839 | 		return -EINVAL; | 
 | 1840 | 	} | 
 | 1841 |  | 
 | 1842 | 	return 0; | 
 | 1843 | } | 
 | 1844 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1845 | static int d40_free_dma(struct d40_chan *d40c) | 
 | 1846 | { | 
 | 1847 |  | 
 | 1848 | 	int res = 0; | 
| Jonas Aaberg | d181b3a | 2010-06-20 21:26:38 +0000 | [diff] [blame] | 1849 | 	u32 event; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1850 | 	struct d40_phy_res *phy = d40c->phy_chan; | 
 | 1851 | 	bool is_src; | 
 | 1852 |  | 
 | 1853 | 	/* Terminate all queued and active transfers */ | 
 | 1854 | 	d40_term_all(d40c); | 
 | 1855 |  | 
 | 1856 | 	if (phy == NULL) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1857 | 		chan_err(d40c, "phy == null\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1858 | 		return -EINVAL; | 
 | 1859 | 	} | 
 | 1860 |  | 
 | 1861 | 	if (phy->allocated_src == D40_ALLOC_FREE && | 
 | 1862 | 	    phy->allocated_dst == D40_ALLOC_FREE) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1863 | 		chan_err(d40c, "channel already free\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1864 | 		return -EINVAL; | 
 | 1865 | 	} | 
 | 1866 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1867 | 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 
 | 1868 | 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 
 | 1869 | 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1870 | 		is_src = false; | 
 | 1871 | 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 
 | 1872 | 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1873 | 		is_src = true; | 
 | 1874 | 	} else { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1875 | 		chan_err(d40c, "Unknown direction\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1876 | 		return -EINVAL; | 
 | 1877 | 	} | 
 | 1878 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1879 | 	pm_runtime_get_sync(d40c->base->dev); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1880 | 	res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 
 | 1881 | 	if (res) { | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1882 | 		chan_err(d40c, "stop failed\n"); | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1883 | 		goto out; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1884 | 	} | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1885 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 1886 | 	d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); | 
 | 1887 |  | 
 | 1888 | 	if (chan_is_logical(d40c)) | 
 | 1889 | 		d40c->base->lookup_log_chans[d40c->log_num] = NULL; | 
 | 1890 | 	else | 
 | 1891 | 		d40c->base->lookup_phy_chans[phy->num] = NULL; | 
 | 1892 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1893 | 	if (d40c->busy) { | 
 | 1894 | 		pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 1895 | 		pm_runtime_put_autosuspend(d40c->base->dev); | 
 | 1896 | 	} | 
 | 1897 |  | 
 | 1898 | 	d40c->busy = false; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1899 | 	d40c->phy_chan = NULL; | 
| Rabin Vincent | ce2ca12 | 2010-10-12 13:00:49 +0000 | [diff] [blame] | 1900 | 	d40c->configured = false; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1901 | out: | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1902 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 1903 | 	pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 1904 | 	pm_runtime_put_autosuspend(d40c->base->dev); | 
 | 1905 | 	return res; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1906 | } | 
 | 1907 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1908 | static bool d40_is_paused(struct d40_chan *d40c) | 
 | 1909 | { | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1910 | 	void __iomem *chanbase = chan_base(d40c); | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1911 | 	bool is_paused = false; | 
 | 1912 | 	unsigned long flags; | 
 | 1913 | 	void __iomem *active_reg; | 
 | 1914 | 	u32 status; | 
 | 1915 | 	u32 event; | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1916 |  | 
 | 1917 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 1918 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 1919 | 	if (chan_is_physical(d40c)) { | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1920 | 		if (d40c->phy_chan->num % 2 == 0) | 
 | 1921 | 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 
 | 1922 | 		else | 
 | 1923 | 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | 
 | 1924 |  | 
 | 1925 | 		status = (readl(active_reg) & | 
 | 1926 | 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 
 | 1927 | 			D40_CHAN_POS(d40c->phy_chan->num); | 
 | 1928 | 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | 
 | 1929 | 			is_paused = true; | 
 | 1930 |  | 
 | 1931 | 		goto _exit; | 
 | 1932 | 	} | 
 | 1933 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1934 | 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 
| Jonas Aaberg | 9dbfbd35c | 2010-08-09 12:08:41 +0000 | [diff] [blame] | 1935 | 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1936 | 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1937 | 		status = readl(chanbase + D40_CHAN_REG_SDLNK); | 
| Jonas Aaberg | 9dbfbd35c | 2010-08-09 12:08:41 +0000 | [diff] [blame] | 1938 | 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1939 | 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 
| Rabin Vincent | 8ca8468 | 2011-01-25 11:18:07 +0100 | [diff] [blame] | 1940 | 		status = readl(chanbase + D40_CHAN_REG_SSLNK); | 
| Jonas Aaberg | 9dbfbd35c | 2010-08-09 12:08:41 +0000 | [diff] [blame] | 1941 | 	} else { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 1942 | 		chan_err(d40c, "Unknown direction\n"); | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1943 | 		goto _exit; | 
 | 1944 | 	} | 
| Jonas Aaberg | 9dbfbd35c | 2010-08-09 12:08:41 +0000 | [diff] [blame] | 1945 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1946 | 	status = (status & D40_EVENTLINE_MASK(event)) >> | 
 | 1947 | 		D40_EVENTLINE_POS(event); | 
 | 1948 |  | 
 | 1949 | 	if (status != D40_DMA_RUN) | 
 | 1950 | 		is_paused = true; | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1951 | _exit: | 
 | 1952 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1953 | 	return is_paused; | 
 | 1954 |  | 
 | 1955 | } | 
 | 1956 |  | 
 | 1957 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1958 | static u32 stedma40_residue(struct dma_chan *chan) | 
 | 1959 | { | 
 | 1960 | 	struct d40_chan *d40c = | 
 | 1961 | 		container_of(chan, struct d40_chan, chan); | 
 | 1962 | 	u32 bytes_left; | 
 | 1963 | 	unsigned long flags; | 
 | 1964 |  | 
 | 1965 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 1966 | 	bytes_left = d40_residue(d40c); | 
 | 1967 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 1968 |  | 
 | 1969 | 	return bytes_left; | 
 | 1970 | } | 
 | 1971 |  | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 1972 | static int | 
 | 1973 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, | 
 | 1974 | 		struct scatterlist *sg_src, struct scatterlist *sg_dst, | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 1975 | 		unsigned int sg_len, dma_addr_t src_dev_addr, | 
 | 1976 | 		dma_addr_t dst_dev_addr) | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 1977 | { | 
 | 1978 | 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 
 | 1979 | 	struct stedma40_half_channel_info *src_info = &cfg->src_info; | 
 | 1980 | 	struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | 
| Rabin Vincent | 5ed04b8 | 2011-01-25 11:18:26 +0100 | [diff] [blame] | 1981 | 	int ret; | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 1982 |  | 
| Rabin Vincent | 5ed04b8 | 2011-01-25 11:18:26 +0100 | [diff] [blame] | 1983 | 	ret = d40_log_sg_to_lli(sg_src, sg_len, | 
 | 1984 | 				src_dev_addr, | 
 | 1985 | 				desc->lli_log.src, | 
 | 1986 | 				chan->log_def.lcsp1, | 
 | 1987 | 				src_info->data_width, | 
 | 1988 | 				dst_info->data_width); | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 1989 |  | 
| Rabin Vincent | 5ed04b8 | 2011-01-25 11:18:26 +0100 | [diff] [blame] | 1990 | 	ret = d40_log_sg_to_lli(sg_dst, sg_len, | 
 | 1991 | 				dst_dev_addr, | 
 | 1992 | 				desc->lli_log.dst, | 
 | 1993 | 				chan->log_def.lcsp3, | 
 | 1994 | 				dst_info->data_width, | 
 | 1995 | 				src_info->data_width); | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 1996 |  | 
| Rabin Vincent | 5ed04b8 | 2011-01-25 11:18:26 +0100 | [diff] [blame] | 1997 | 	return ret < 0 ? ret : 0; | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 1998 | } | 
 | 1999 |  | 
 | 2000 | static int | 
 | 2001 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | 
 | 2002 | 		struct scatterlist *sg_src, struct scatterlist *sg_dst, | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 2003 | 		unsigned int sg_len, dma_addr_t src_dev_addr, | 
 | 2004 | 		dma_addr_t dst_dev_addr) | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 2005 | { | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 2006 | 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 
 | 2007 | 	struct stedma40_half_channel_info *src_info = &cfg->src_info; | 
 | 2008 | 	struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2009 | 	unsigned long flags = 0; | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 2010 | 	int ret; | 
 | 2011 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2012 | 	if (desc->cyclic) | 
 | 2013 | 		flags |= LLI_CYCLIC | LLI_TERM_INT; | 
 | 2014 |  | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 2015 | 	ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, | 
 | 2016 | 				desc->lli_phy.src, | 
 | 2017 | 				virt_to_phys(desc->lli_phy.src), | 
 | 2018 | 				chan->src_def_cfg, | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2019 | 				src_info, dst_info, flags); | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 2020 |  | 
 | 2021 | 	ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, | 
 | 2022 | 				desc->lli_phy.dst, | 
 | 2023 | 				virt_to_phys(desc->lli_phy.dst), | 
 | 2024 | 				chan->dst_def_cfg, | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2025 | 				dst_info, src_info, flags); | 
| Rabin Vincent | 3e3a076 | 2011-01-25 11:18:21 +0100 | [diff] [blame] | 2026 |  | 
 | 2027 | 	dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, | 
 | 2028 | 				   desc->lli_pool.size, DMA_TO_DEVICE); | 
 | 2029 |  | 
 | 2030 | 	return ret < 0 ? ret : 0; | 
 | 2031 | } | 
 | 2032 |  | 
 | 2033 |  | 
| Rabin Vincent | 5f81158 | 2011-01-25 11:18:18 +0100 | [diff] [blame] | 2034 | static struct d40_desc * | 
 | 2035 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | 
 | 2036 | 	      unsigned int sg_len, unsigned long dma_flags) | 
 | 2037 | { | 
 | 2038 | 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 
 | 2039 | 	struct d40_desc *desc; | 
| Rabin Vincent | dbd8878 | 2011-01-25 11:18:19 +0100 | [diff] [blame] | 2040 | 	int ret; | 
| Rabin Vincent | 5f81158 | 2011-01-25 11:18:18 +0100 | [diff] [blame] | 2041 |  | 
 | 2042 | 	desc = d40_desc_get(chan); | 
 | 2043 | 	if (!desc) | 
 | 2044 | 		return NULL; | 
 | 2045 |  | 
 | 2046 | 	desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, | 
 | 2047 | 					cfg->dst_info.data_width); | 
 | 2048 | 	if (desc->lli_len < 0) { | 
 | 2049 | 		chan_err(chan, "Unaligned size\n"); | 
| Rabin Vincent | dbd8878 | 2011-01-25 11:18:19 +0100 | [diff] [blame] | 2050 | 		goto err; | 
| Rabin Vincent | 5f81158 | 2011-01-25 11:18:18 +0100 | [diff] [blame] | 2051 | 	} | 
 | 2052 |  | 
| Rabin Vincent | dbd8878 | 2011-01-25 11:18:19 +0100 | [diff] [blame] | 2053 | 	ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); | 
 | 2054 | 	if (ret < 0) { | 
 | 2055 | 		chan_err(chan, "Could not allocate lli\n"); | 
 | 2056 | 		goto err; | 
 | 2057 | 	} | 
 | 2058 |  | 
 | 2059 |  | 
| Rabin Vincent | 5f81158 | 2011-01-25 11:18:18 +0100 | [diff] [blame] | 2060 | 	desc->lli_current = 0; | 
 | 2061 | 	desc->txd.flags = dma_flags; | 
 | 2062 | 	desc->txd.tx_submit = d40_tx_submit; | 
 | 2063 |  | 
 | 2064 | 	dma_async_tx_descriptor_init(&desc->txd, &chan->chan); | 
 | 2065 |  | 
 | 2066 | 	return desc; | 
| Rabin Vincent | dbd8878 | 2011-01-25 11:18:19 +0100 | [diff] [blame] | 2067 |  | 
 | 2068 | err: | 
 | 2069 | 	d40_desc_free(chan, desc); | 
 | 2070 | 	return NULL; | 
| Rabin Vincent | 5f81158 | 2011-01-25 11:18:18 +0100 | [diff] [blame] | 2071 | } | 
 | 2072 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2073 | static dma_addr_t | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2074 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2075 | { | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2076 | 	struct stedma40_platform_data *plat = chan->base->plat_data; | 
 | 2077 | 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 
| Philippe Langlais | 711b9ce | 2011-05-07 17:09:43 +0200 | [diff] [blame] | 2078 | 	dma_addr_t addr = 0; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2079 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2080 | 	if (chan->runtime_addr) | 
 | 2081 | 		return chan->runtime_addr; | 
 | 2082 |  | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2083 | 	if (direction == DMA_DEV_TO_MEM) | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2084 | 		addr = plat->dev_rx[cfg->src_dev_type]; | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2085 | 	else if (direction == DMA_MEM_TO_DEV) | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2086 | 		addr = plat->dev_tx[cfg->dst_dev_type]; | 
 | 2087 |  | 
 | 2088 | 	return addr; | 
 | 2089 | } | 
 | 2090 |  | 
 | 2091 | static struct dma_async_tx_descriptor * | 
 | 2092 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | 
 | 2093 | 	    struct scatterlist *sg_dst, unsigned int sg_len, | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2094 | 	    enum dma_transfer_direction direction, unsigned long dma_flags) | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2095 | { | 
 | 2096 | 	struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 2097 | 	dma_addr_t src_dev_addr = 0; | 
 | 2098 | 	dma_addr_t dst_dev_addr = 0; | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2099 | 	struct d40_desc *desc; | 
 | 2100 | 	unsigned long flags; | 
 | 2101 | 	int ret; | 
 | 2102 |  | 
 | 2103 | 	if (!chan->phy_chan) { | 
 | 2104 | 		chan_err(chan, "Cannot prepare unallocated channel\n"); | 
 | 2105 | 		return NULL; | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2106 | 	} | 
 | 2107 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2108 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2109 | 	spin_lock_irqsave(&chan->lock, flags); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2110 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2111 | 	desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); | 
 | 2112 | 	if (desc == NULL) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2113 | 		goto err; | 
 | 2114 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2115 | 	if (sg_next(&sg_src[sg_len - 1]) == sg_src) | 
 | 2116 | 		desc->cyclic = true; | 
 | 2117 |  | 
| Linus Walleij | 7e426da | 2012-04-12 18:12:52 +0200 | [diff] [blame] | 2118 | 	if (direction != DMA_TRANS_NONE) { | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 2119 | 		dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 
 | 2120 |  | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2121 | 		if (direction == DMA_DEV_TO_MEM) | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 2122 | 			src_dev_addr = dev_addr; | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2123 | 		else if (direction == DMA_MEM_TO_DEV) | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 2124 | 			dst_dev_addr = dev_addr; | 
 | 2125 | 	} | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2126 |  | 
 | 2127 | 	if (chan_is_logical(chan)) | 
 | 2128 | 		ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 2129 | 				      sg_len, src_dev_addr, dst_dev_addr); | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2130 | 	else | 
 | 2131 | 		ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, | 
| Rabin Vincent | 822c567 | 2011-01-25 11:18:28 +0100 | [diff] [blame] | 2132 | 				      sg_len, src_dev_addr, dst_dev_addr); | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2133 |  | 
 | 2134 | 	if (ret) { | 
 | 2135 | 		chan_err(chan, "Failed to prepare %s sg job: %d\n", | 
 | 2136 | 			 chan_is_logical(chan) ? "log" : "phy", ret); | 
 | 2137 | 		goto err; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2138 | 	} | 
 | 2139 |  | 
| Per Forlin | 82babbb36 | 2011-08-29 13:33:35 +0200 | [diff] [blame] | 2140 | 	/* | 
 | 2141 | 	 * add descriptor to the prepare queue in order to be able | 
 | 2142 | 	 * to free them later in terminate_all | 
 | 2143 | 	 */ | 
 | 2144 | 	list_add_tail(&desc->node, &chan->prepare_queue); | 
 | 2145 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2146 | 	spin_unlock_irqrestore(&chan->lock, flags); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2147 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2148 | 	return &desc->txd; | 
 | 2149 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2150 | err: | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2151 | 	if (desc) | 
 | 2152 | 		d40_desc_free(chan, desc); | 
 | 2153 | 	spin_unlock_irqrestore(&chan->lock, flags); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2154 | 	return NULL; | 
 | 2155 | } | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2156 |  | 
 | 2157 | bool stedma40_filter(struct dma_chan *chan, void *data) | 
 | 2158 | { | 
 | 2159 | 	struct stedma40_chan_cfg *info = data; | 
 | 2160 | 	struct d40_chan *d40c = | 
 | 2161 | 		container_of(chan, struct d40_chan, chan); | 
 | 2162 | 	int err; | 
 | 2163 |  | 
 | 2164 | 	if (data) { | 
 | 2165 | 		err = d40_validate_conf(d40c, info); | 
 | 2166 | 		if (!err) | 
 | 2167 | 			d40c->dma_cfg = *info; | 
 | 2168 | 	} else | 
 | 2169 | 		err = d40_config_memcpy(d40c); | 
 | 2170 |  | 
| Rabin Vincent | ce2ca12 | 2010-10-12 13:00:49 +0000 | [diff] [blame] | 2171 | 	if (!err) | 
 | 2172 | 		d40c->configured = true; | 
 | 2173 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2174 | 	return err == 0; | 
 | 2175 | } | 
 | 2176 | EXPORT_SYMBOL(stedma40_filter); | 
 | 2177 |  | 
| Rabin Vincent | ac2c0a3 | 2011-01-25 11:18:11 +0100 | [diff] [blame] | 2178 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) | 
 | 2179 | { | 
 | 2180 | 	bool realtime = d40c->dma_cfg.realtime; | 
 | 2181 | 	bool highprio = d40c->dma_cfg.high_priority; | 
 | 2182 | 	u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; | 
 | 2183 | 	u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; | 
 | 2184 | 	u32 event = D40_TYPE_TO_EVENT(dev_type); | 
 | 2185 | 	u32 group = D40_TYPE_TO_GROUP(dev_type); | 
 | 2186 | 	u32 bit = 1 << event; | 
 | 2187 |  | 
 | 2188 | 	/* Destination event lines are stored in the upper halfword */ | 
 | 2189 | 	if (!src) | 
 | 2190 | 		bit <<= 16; | 
 | 2191 |  | 
 | 2192 | 	writel(bit, d40c->base->virtbase + prioreg + group * 4); | 
 | 2193 | 	writel(bit, d40c->base->virtbase + rtreg + group * 4); | 
 | 2194 | } | 
 | 2195 |  | 
 | 2196 | static void d40_set_prio_realtime(struct d40_chan *d40c) | 
 | 2197 | { | 
 | 2198 | 	if (d40c->base->rev < 3) | 
 | 2199 | 		return; | 
 | 2200 |  | 
 | 2201 | 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) || | 
 | 2202 | 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | 
 | 2203 | 		__d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); | 
 | 2204 |  | 
 | 2205 | 	if ((d40c->dma_cfg.dir ==  STEDMA40_MEM_TO_PERIPH) || | 
 | 2206 | 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | 
 | 2207 | 		__d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); | 
 | 2208 | } | 
 | 2209 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2210 | /* DMA ENGINE functions */ | 
 | 2211 | static int d40_alloc_chan_resources(struct dma_chan *chan) | 
 | 2212 | { | 
 | 2213 | 	int err; | 
 | 2214 | 	unsigned long flags; | 
 | 2215 | 	struct d40_chan *d40c = | 
 | 2216 | 		container_of(chan, struct d40_chan, chan); | 
| Linus Walleij | ef1872e | 2010-06-20 21:24:52 +0000 | [diff] [blame] | 2217 | 	bool is_free_phy; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2218 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 2219 |  | 
| Russell King - ARM Linux | d3ee98cdc | 2012-03-06 22:35:47 +0000 | [diff] [blame] | 2220 | 	dma_cookie_init(chan); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2221 |  | 
| Rabin Vincent | ce2ca12 | 2010-10-12 13:00:49 +0000 | [diff] [blame] | 2222 | 	/* If no dma configuration is set use default configuration (memcpy) */ | 
 | 2223 | 	if (!d40c->configured) { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2224 | 		err = d40_config_memcpy(d40c); | 
| Jonas Aaberg | ff0b12b | 2010-06-20 21:25:15 +0000 | [diff] [blame] | 2225 | 		if (err) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2226 | 			chan_err(d40c, "Failed to configure memcpy channel\n"); | 
| Jonas Aaberg | ff0b12b | 2010-06-20 21:25:15 +0000 | [diff] [blame] | 2227 | 			goto fail; | 
 | 2228 | 		} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2229 | 	} | 
 | 2230 |  | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 2231 | 	err = d40_allocate_channel(d40c, &is_free_phy); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2232 | 	if (err) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2233 | 		chan_err(d40c, "Failed to allocate channel\n"); | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2234 | 		d40c->configured = false; | 
| Jonas Aaberg | ff0b12b | 2010-06-20 21:25:15 +0000 | [diff] [blame] | 2235 | 		goto fail; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2236 | 	} | 
 | 2237 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2238 | 	pm_runtime_get_sync(d40c->base->dev); | 
| Linus Walleij | ef1872e | 2010-06-20 21:24:52 +0000 | [diff] [blame] | 2239 | 	/* Fill in basic CFG register values */ | 
 | 2240 | 	d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 2241 | 		    &d40c->dst_def_cfg, chan_is_logical(d40c)); | 
| Linus Walleij | ef1872e | 2010-06-20 21:24:52 +0000 | [diff] [blame] | 2242 |  | 
| Rabin Vincent | ac2c0a3 | 2011-01-25 11:18:11 +0100 | [diff] [blame] | 2243 | 	d40_set_prio_realtime(d40c); | 
 | 2244 |  | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 2245 | 	if (chan_is_logical(d40c)) { | 
| Linus Walleij | ef1872e | 2010-06-20 21:24:52 +0000 | [diff] [blame] | 2246 | 		d40_log_cfg(&d40c->dma_cfg, | 
 | 2247 | 			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 
 | 2248 |  | 
 | 2249 | 		if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 
 | 2250 | 			d40c->lcpa = d40c->base->lcpa_base + | 
 | 2251 | 			  d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; | 
 | 2252 | 		else | 
 | 2253 | 			d40c->lcpa = d40c->base->lcpa_base + | 
 | 2254 | 			  d40c->dma_cfg.dst_dev_type * | 
 | 2255 | 			  D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 
 | 2256 | 	} | 
 | 2257 |  | 
| Narayanan G | 5cd326f | 2011-11-30 19:20:42 +0530 | [diff] [blame] | 2258 | 	dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | 
 | 2259 | 		 chan_is_logical(d40c) ? "logical" : "physical", | 
 | 2260 | 		 d40c->phy_chan->num, | 
 | 2261 | 		 d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | 
 | 2262 |  | 
 | 2263 |  | 
| Linus Walleij | ef1872e | 2010-06-20 21:24:52 +0000 | [diff] [blame] | 2264 | 	/* | 
 | 2265 | 	 * Only write channel configuration to the DMA if the physical | 
 | 2266 | 	 * resource is free. In case of multiple logical channels | 
 | 2267 | 	 * on the same physical resource, only the first write is necessary. | 
 | 2268 | 	 */ | 
| Jonas Aaberg | b55912c | 2010-08-09 12:08:02 +0000 | [diff] [blame] | 2269 | 	if (is_free_phy) | 
 | 2270 | 		d40_config_write(d40c); | 
| Jonas Aaberg | ff0b12b | 2010-06-20 21:25:15 +0000 | [diff] [blame] | 2271 | fail: | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2272 | 	pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 2273 | 	pm_runtime_put_autosuspend(d40c->base->dev); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2274 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
| Jonas Aaberg | ff0b12b | 2010-06-20 21:25:15 +0000 | [diff] [blame] | 2275 | 	return err; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2276 | } | 
 | 2277 |  | 
 | 2278 | static void d40_free_chan_resources(struct dma_chan *chan) | 
 | 2279 | { | 
 | 2280 | 	struct d40_chan *d40c = | 
 | 2281 | 		container_of(chan, struct d40_chan, chan); | 
 | 2282 | 	int err; | 
 | 2283 | 	unsigned long flags; | 
 | 2284 |  | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2285 | 	if (d40c->phy_chan == NULL) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2286 | 		chan_err(d40c, "Cannot free unallocated channel\n"); | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2287 | 		return; | 
 | 2288 | 	} | 
 | 2289 |  | 
 | 2290 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2291 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 2292 |  | 
 | 2293 | 	err = d40_free_dma(d40c); | 
 | 2294 |  | 
 | 2295 | 	if (err) | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2296 | 		chan_err(d40c, "Failed to free channel\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2297 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 2298 | } | 
 | 2299 |  | 
 | 2300 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | 
 | 2301 | 						       dma_addr_t dst, | 
 | 2302 | 						       dma_addr_t src, | 
 | 2303 | 						       size_t size, | 
| Jonas Aaberg | 2a61434 | 2010-06-20 21:25:24 +0000 | [diff] [blame] | 2304 | 						       unsigned long dma_flags) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2305 | { | 
| Rabin Vincent | 95944c6 | 2011-01-25 11:18:17 +0100 | [diff] [blame] | 2306 | 	struct scatterlist dst_sg; | 
 | 2307 | 	struct scatterlist src_sg; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2308 |  | 
| Rabin Vincent | 95944c6 | 2011-01-25 11:18:17 +0100 | [diff] [blame] | 2309 | 	sg_init_table(&dst_sg, 1); | 
 | 2310 | 	sg_init_table(&src_sg, 1); | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2311 |  | 
| Rabin Vincent | 95944c6 | 2011-01-25 11:18:17 +0100 | [diff] [blame] | 2312 | 	sg_dma_address(&dst_sg) = dst; | 
 | 2313 | 	sg_dma_address(&src_sg) = src; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2314 |  | 
| Rabin Vincent | 95944c6 | 2011-01-25 11:18:17 +0100 | [diff] [blame] | 2315 | 	sg_dma_len(&dst_sg) = size; | 
 | 2316 | 	sg_dma_len(&src_sg) = size; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2317 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2318 | 	return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2319 | } | 
 | 2320 |  | 
| Ira Snyder | 0d68866 | 2010-09-30 11:46:47 +0000 | [diff] [blame] | 2321 | static struct dma_async_tx_descriptor * | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2322 | d40_prep_memcpy_sg(struct dma_chan *chan, | 
 | 2323 | 		   struct scatterlist *dst_sg, unsigned int dst_nents, | 
 | 2324 | 		   struct scatterlist *src_sg, unsigned int src_nents, | 
 | 2325 | 		   unsigned long dma_flags) | 
| Ira Snyder | 0d68866 | 2010-09-30 11:46:47 +0000 | [diff] [blame] | 2326 | { | 
 | 2327 | 	if (dst_nents != src_nents) | 
 | 2328 | 		return NULL; | 
 | 2329 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2330 | 	return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); | 
| Rabin Vincent | 00ac034 | 2011-01-25 11:18:20 +0100 | [diff] [blame] | 2331 | } | 
 | 2332 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2333 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 
 | 2334 | 							 struct scatterlist *sgl, | 
 | 2335 | 							 unsigned int sg_len, | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2336 | 							 enum dma_transfer_direction direction, | 
| Alexandre Bounine | 185ecb5 | 2012-03-08 15:35:13 -0500 | [diff] [blame] | 2337 | 							 unsigned long dma_flags, | 
 | 2338 | 							 void *context) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2339 | { | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2340 | 	if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) | 
| Rabin Vincent | 00ac034 | 2011-01-25 11:18:20 +0100 | [diff] [blame] | 2341 | 		return NULL; | 
 | 2342 |  | 
| Rabin Vincent | cade1d3 | 2011-01-25 11:18:23 +0100 | [diff] [blame] | 2343 | 	return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2344 | } | 
 | 2345 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2346 | static struct dma_async_tx_descriptor * | 
 | 2347 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 
 | 2348 | 		     size_t buf_len, size_t period_len, | 
| Peter Ujfalusi | ec8b5e4 | 2012-09-14 15:05:47 +0300 | [diff] [blame] | 2349 | 		     enum dma_transfer_direction direction, unsigned long flags, | 
 | 2350 | 		     void *context) | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2351 | { | 
 | 2352 | 	unsigned int periods = buf_len / period_len; | 
 | 2353 | 	struct dma_async_tx_descriptor *txd; | 
 | 2354 | 	struct scatterlist *sg; | 
 | 2355 | 	int i; | 
 | 2356 |  | 
| Robert Marklund | 79ca7ec | 2011-06-27 11:33:24 +0200 | [diff] [blame] | 2357 | 	sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2358 | 	for (i = 0; i < periods; i++) { | 
 | 2359 | 		sg_dma_address(&sg[i]) = dma_addr; | 
 | 2360 | 		sg_dma_len(&sg[i]) = period_len; | 
 | 2361 | 		dma_addr += period_len; | 
 | 2362 | 	} | 
 | 2363 |  | 
 | 2364 | 	sg[periods].offset = 0; | 
| Lars-Peter Clausen | fdaf9c4 | 2012-04-25 20:50:52 +0200 | [diff] [blame] | 2365 | 	sg_dma_len(&sg[periods]) = 0; | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2366 | 	sg[periods].page_link = | 
 | 2367 | 		((unsigned long)sg | 0x01) & ~0x02; | 
 | 2368 |  | 
 | 2369 | 	txd = d40_prep_sg(chan, sg, sg, periods, direction, | 
 | 2370 | 			  DMA_PREP_INTERRUPT); | 
 | 2371 |  | 
 | 2372 | 	kfree(sg); | 
 | 2373 |  | 
 | 2374 | 	return txd; | 
 | 2375 | } | 
 | 2376 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2377 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 
 | 2378 | 				     dma_cookie_t cookie, | 
 | 2379 | 				     struct dma_tx_state *txstate) | 
 | 2380 | { | 
 | 2381 | 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
| Russell King - ARM Linux | 96a2af4 | 2012-03-06 22:35:27 +0000 | [diff] [blame] | 2382 | 	enum dma_status ret; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2383 |  | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2384 | 	if (d40c->phy_chan == NULL) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2385 | 		chan_err(d40c, "Cannot read status of unallocated channel\n"); | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2386 | 		return -EINVAL; | 
 | 2387 | 	} | 
 | 2388 |  | 
| Russell King - ARM Linux | 96a2af4 | 2012-03-06 22:35:27 +0000 | [diff] [blame] | 2389 | 	ret = dma_cookie_status(chan, cookie, txstate); | 
 | 2390 | 	if (ret != DMA_SUCCESS) | 
 | 2391 | 		dma_set_residue(txstate, stedma40_residue(chan)); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2392 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 2393 | 	if (d40_is_paused(d40c)) | 
 | 2394 | 		ret = DMA_PAUSED; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2395 |  | 
 | 2396 | 	return ret; | 
 | 2397 | } | 
 | 2398 |  | 
 | 2399 | static void d40_issue_pending(struct dma_chan *chan) | 
 | 2400 | { | 
 | 2401 | 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
 | 2402 | 	unsigned long flags; | 
 | 2403 |  | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2404 | 	if (d40c->phy_chan == NULL) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2405 | 		chan_err(d40c, "Channel is not allocated!\n"); | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2406 | 		return; | 
 | 2407 | 	} | 
 | 2408 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2409 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 2410 |  | 
| Per Forlin | a8f3067 | 2011-06-26 23:29:52 +0200 | [diff] [blame] | 2411 | 	list_splice_tail_init(&d40c->pending_queue, &d40c->queue); | 
 | 2412 |  | 
 | 2413 | 	/* Busy means that queued jobs are already being processed */ | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2414 | 	if (!d40c->busy) | 
 | 2415 | 		(void) d40_queue_start(d40c); | 
 | 2416 |  | 
 | 2417 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 2418 | } | 
 | 2419 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 2420 | static void d40_terminate_all(struct dma_chan *chan) | 
 | 2421 | { | 
 | 2422 | 	unsigned long flags; | 
 | 2423 | 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
 | 2424 | 	int ret; | 
 | 2425 |  | 
 | 2426 | 	spin_lock_irqsave(&d40c->lock, flags); | 
 | 2427 |  | 
 | 2428 | 	pm_runtime_get_sync(d40c->base->dev); | 
 | 2429 | 	ret = d40_channel_execute_command(d40c, D40_DMA_STOP); | 
 | 2430 | 	if (ret) | 
 | 2431 | 		chan_err(d40c, "Failed to stop channel\n"); | 
 | 2432 |  | 
 | 2433 | 	d40_term_all(d40c); | 
 | 2434 | 	pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 2435 | 	pm_runtime_put_autosuspend(d40c->base->dev); | 
 | 2436 | 	if (d40c->busy) { | 
 | 2437 | 		pm_runtime_mark_last_busy(d40c->base->dev); | 
 | 2438 | 		pm_runtime_put_autosuspend(d40c->base->dev); | 
 | 2439 | 	} | 
 | 2440 | 	d40c->busy = false; | 
 | 2441 |  | 
 | 2442 | 	spin_unlock_irqrestore(&d40c->lock, flags); | 
 | 2443 | } | 
 | 2444 |  | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2445 | static int | 
 | 2446 | dma40_config_to_halfchannel(struct d40_chan *d40c, | 
 | 2447 | 			    struct stedma40_half_channel_info *info, | 
 | 2448 | 			    enum dma_slave_buswidth width, | 
 | 2449 | 			    u32 maxburst) | 
 | 2450 | { | 
 | 2451 | 	enum stedma40_periph_data_width addr_width; | 
 | 2452 | 	int psize; | 
 | 2453 |  | 
 | 2454 | 	switch (width) { | 
 | 2455 | 	case DMA_SLAVE_BUSWIDTH_1_BYTE: | 
 | 2456 | 		addr_width = STEDMA40_BYTE_WIDTH; | 
 | 2457 | 		break; | 
 | 2458 | 	case DMA_SLAVE_BUSWIDTH_2_BYTES: | 
 | 2459 | 		addr_width = STEDMA40_HALFWORD_WIDTH; | 
 | 2460 | 		break; | 
 | 2461 | 	case DMA_SLAVE_BUSWIDTH_4_BYTES: | 
 | 2462 | 		addr_width = STEDMA40_WORD_WIDTH; | 
 | 2463 | 		break; | 
 | 2464 | 	case DMA_SLAVE_BUSWIDTH_8_BYTES: | 
 | 2465 | 		addr_width = STEDMA40_DOUBLEWORD_WIDTH; | 
 | 2466 | 		break; | 
 | 2467 | 	default: | 
 | 2468 | 		dev_err(d40c->base->dev, | 
 | 2469 | 			"illegal peripheral address width " | 
 | 2470 | 			"requested (%d)\n", | 
 | 2471 | 			width); | 
 | 2472 | 		return -EINVAL; | 
 | 2473 | 	} | 
 | 2474 |  | 
 | 2475 | 	if (chan_is_logical(d40c)) { | 
 | 2476 | 		if (maxburst >= 16) | 
 | 2477 | 			psize = STEDMA40_PSIZE_LOG_16; | 
 | 2478 | 		else if (maxburst >= 8) | 
 | 2479 | 			psize = STEDMA40_PSIZE_LOG_8; | 
 | 2480 | 		else if (maxburst >= 4) | 
 | 2481 | 			psize = STEDMA40_PSIZE_LOG_4; | 
 | 2482 | 		else | 
 | 2483 | 			psize = STEDMA40_PSIZE_LOG_1; | 
 | 2484 | 	} else { | 
 | 2485 | 		if (maxburst >= 16) | 
 | 2486 | 			psize = STEDMA40_PSIZE_PHY_16; | 
 | 2487 | 		else if (maxburst >= 8) | 
 | 2488 | 			psize = STEDMA40_PSIZE_PHY_8; | 
 | 2489 | 		else if (maxburst >= 4) | 
 | 2490 | 			psize = STEDMA40_PSIZE_PHY_4; | 
 | 2491 | 		else | 
 | 2492 | 			psize = STEDMA40_PSIZE_PHY_1; | 
 | 2493 | 	} | 
 | 2494 |  | 
 | 2495 | 	info->data_width = addr_width; | 
 | 2496 | 	info->psize = psize; | 
 | 2497 | 	info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 
 | 2498 |  | 
 | 2499 | 	return 0; | 
 | 2500 | } | 
 | 2501 |  | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2502 | /* Runtime reconfiguration extension */ | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2503 | static int d40_set_runtime_config(struct dma_chan *chan, | 
 | 2504 | 				  struct dma_slave_config *config) | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2505 | { | 
 | 2506 | 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
 | 2507 | 	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2508 | 	enum dma_slave_buswidth src_addr_width, dst_addr_width; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2509 | 	dma_addr_t config_addr; | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2510 | 	u32 src_maxburst, dst_maxburst; | 
 | 2511 | 	int ret; | 
 | 2512 |  | 
 | 2513 | 	src_addr_width = config->src_addr_width; | 
 | 2514 | 	src_maxburst = config->src_maxburst; | 
 | 2515 | 	dst_addr_width = config->dst_addr_width; | 
 | 2516 | 	dst_maxburst = config->dst_maxburst; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2517 |  | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2518 | 	if (config->direction == DMA_DEV_TO_MEM) { | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2519 | 		dma_addr_t dev_addr_rx = | 
 | 2520 | 			d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | 
 | 2521 |  | 
 | 2522 | 		config_addr = config->src_addr; | 
 | 2523 | 		if (dev_addr_rx) | 
 | 2524 | 			dev_dbg(d40c->base->dev, | 
 | 2525 | 				"channel has a pre-wired RX address %08x " | 
 | 2526 | 				"overriding with %08x\n", | 
 | 2527 | 				dev_addr_rx, config_addr); | 
 | 2528 | 		if (cfg->dir != STEDMA40_PERIPH_TO_MEM) | 
 | 2529 | 			dev_dbg(d40c->base->dev, | 
 | 2530 | 				"channel was not configured for peripheral " | 
 | 2531 | 				"to memory transfer (%d) overriding\n", | 
 | 2532 | 				cfg->dir); | 
 | 2533 | 		cfg->dir = STEDMA40_PERIPH_TO_MEM; | 
 | 2534 |  | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2535 | 		/* Configure the memory side */ | 
 | 2536 | 		if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | 
 | 2537 | 			dst_addr_width = src_addr_width; | 
 | 2538 | 		if (dst_maxburst == 0) | 
 | 2539 | 			dst_maxburst = src_maxburst; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2540 |  | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2541 | 	} else if (config->direction == DMA_MEM_TO_DEV) { | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2542 | 		dma_addr_t dev_addr_tx = | 
 | 2543 | 			d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | 
 | 2544 |  | 
 | 2545 | 		config_addr = config->dst_addr; | 
 | 2546 | 		if (dev_addr_tx) | 
 | 2547 | 			dev_dbg(d40c->base->dev, | 
 | 2548 | 				"channel has a pre-wired TX address %08x " | 
 | 2549 | 				"overriding with %08x\n", | 
 | 2550 | 				dev_addr_tx, config_addr); | 
 | 2551 | 		if (cfg->dir != STEDMA40_MEM_TO_PERIPH) | 
 | 2552 | 			dev_dbg(d40c->base->dev, | 
 | 2553 | 				"channel was not configured for memory " | 
 | 2554 | 				"to peripheral transfer (%d) overriding\n", | 
 | 2555 | 				cfg->dir); | 
 | 2556 | 		cfg->dir = STEDMA40_MEM_TO_PERIPH; | 
 | 2557 |  | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2558 | 		/* Configure the memory side */ | 
 | 2559 | 		if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | 
 | 2560 | 			src_addr_width = dst_addr_width; | 
 | 2561 | 		if (src_maxburst == 0) | 
 | 2562 | 			src_maxburst = dst_maxburst; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2563 | 	} else { | 
 | 2564 | 		dev_err(d40c->base->dev, | 
 | 2565 | 			"unrecognized channel direction %d\n", | 
 | 2566 | 			config->direction); | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2567 | 		return -EINVAL; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2568 | 	} | 
 | 2569 |  | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2570 | 	if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2571 | 		dev_err(d40c->base->dev, | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2572 | 			"src/dst width/maxburst mismatch: %d*%d != %d*%d\n", | 
 | 2573 | 			src_maxburst, | 
 | 2574 | 			src_addr_width, | 
 | 2575 | 			dst_maxburst, | 
 | 2576 | 			dst_addr_width); | 
 | 2577 | 		return -EINVAL; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2578 | 	} | 
 | 2579 |  | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2580 | 	ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, | 
 | 2581 | 					  src_addr_width, | 
 | 2582 | 					  src_maxburst); | 
 | 2583 | 	if (ret) | 
 | 2584 | 		return ret; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2585 |  | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2586 | 	ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, | 
 | 2587 | 					  dst_addr_width, | 
 | 2588 | 					  dst_maxburst); | 
 | 2589 | 	if (ret) | 
 | 2590 | 		return ret; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2591 |  | 
| Per Forlin | a59670a | 2010-10-06 09:05:27 +0000 | [diff] [blame] | 2592 | 	/* Fill in register values */ | 
| Rabin Vincent | 724a857 | 2011-01-25 11:18:08 +0100 | [diff] [blame] | 2593 | 	if (chan_is_logical(d40c)) | 
| Per Forlin | a59670a | 2010-10-06 09:05:27 +0000 | [diff] [blame] | 2594 | 		d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 
 | 2595 | 	else | 
 | 2596 | 		d40_phy_cfg(cfg, &d40c->src_def_cfg, | 
 | 2597 | 			    &d40c->dst_def_cfg, false); | 
 | 2598 |  | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2599 | 	/* These settings will take precedence later */ | 
 | 2600 | 	d40c->runtime_addr = config_addr; | 
 | 2601 | 	d40c->runtime_direction = config->direction; | 
 | 2602 | 	dev_dbg(d40c->base->dev, | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2603 | 		"configured channel %s for %s, data width %d/%d, " | 
 | 2604 | 		"maxburst %d/%d elements, LE, no flow control\n", | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2605 | 		dma_chan_name(chan), | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 2606 | 		(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2607 | 		src_addr_width, dst_addr_width, | 
 | 2608 | 		src_maxburst, dst_maxburst); | 
 | 2609 |  | 
 | 2610 | 	return 0; | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2611 | } | 
 | 2612 |  | 
| Linus Walleij | 0582763 | 2010-05-17 16:30:42 -0700 | [diff] [blame] | 2613 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 
 | 2614 | 		       unsigned long arg) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2615 | { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2616 | 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
 | 2617 |  | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2618 | 	if (d40c->phy_chan == NULL) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2619 | 		chan_err(d40c, "Channel is not allocated!\n"); | 
| Jonas Aaberg | 0d0f6b8 | 2010-06-20 21:25:31 +0000 | [diff] [blame] | 2620 | 		return -EINVAL; | 
 | 2621 | 	} | 
 | 2622 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2623 | 	switch (cmd) { | 
 | 2624 | 	case DMA_TERMINATE_ALL: | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 2625 | 		d40_terminate_all(chan); | 
 | 2626 | 		return 0; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2627 | 	case DMA_PAUSE: | 
| Rabin Vincent | 86eb5fb | 2011-01-25 11:18:34 +0100 | [diff] [blame] | 2628 | 		return d40_pause(d40c); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2629 | 	case DMA_RESUME: | 
| Rabin Vincent | 86eb5fb | 2011-01-25 11:18:34 +0100 | [diff] [blame] | 2630 | 		return d40_resume(d40c); | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2631 | 	case DMA_SLAVE_CONFIG: | 
| Rabin Vincent | 98ca528 | 2011-06-27 11:33:38 +0200 | [diff] [blame] | 2632 | 		return d40_set_runtime_config(chan, | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2633 | 			(struct dma_slave_config *) arg); | 
| Linus Walleij | 95e1400 | 2010-08-04 13:37:45 +0200 | [diff] [blame] | 2634 | 	default: | 
 | 2635 | 		break; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2636 | 	} | 
 | 2637 |  | 
 | 2638 | 	/* Other commands are unimplemented */ | 
 | 2639 | 	return -ENXIO; | 
 | 2640 | } | 
 | 2641 |  | 
 | 2642 | /* Initialization functions */ | 
 | 2643 |  | 
 | 2644 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | 
 | 2645 | 				 struct d40_chan *chans, int offset, | 
 | 2646 | 				 int num_chans) | 
 | 2647 | { | 
 | 2648 | 	int i = 0; | 
 | 2649 | 	struct d40_chan *d40c; | 
 | 2650 |  | 
 | 2651 | 	INIT_LIST_HEAD(&dma->channels); | 
 | 2652 |  | 
 | 2653 | 	for (i = offset; i < offset + num_chans; i++) { | 
 | 2654 | 		d40c = &chans[i]; | 
 | 2655 | 		d40c->base = base; | 
 | 2656 | 		d40c->chan.device = dma; | 
 | 2657 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2658 | 		spin_lock_init(&d40c->lock); | 
 | 2659 |  | 
 | 2660 | 		d40c->log_num = D40_PHY_CHAN; | 
 | 2661 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2662 | 		INIT_LIST_HEAD(&d40c->active); | 
 | 2663 | 		INIT_LIST_HEAD(&d40c->queue); | 
| Per Forlin | a8f3067 | 2011-06-26 23:29:52 +0200 | [diff] [blame] | 2664 | 		INIT_LIST_HEAD(&d40c->pending_queue); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2665 | 		INIT_LIST_HEAD(&d40c->client); | 
| Per Forlin | 82babbb36 | 2011-08-29 13:33:35 +0200 | [diff] [blame] | 2666 | 		INIT_LIST_HEAD(&d40c->prepare_queue); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2667 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2668 | 		tasklet_init(&d40c->tasklet, dma_tasklet, | 
 | 2669 | 			     (unsigned long) d40c); | 
 | 2670 |  | 
 | 2671 | 		list_add_tail(&d40c->chan.device_node, | 
 | 2672 | 			      &dma->channels); | 
 | 2673 | 	} | 
 | 2674 | } | 
 | 2675 |  | 
| Rabin Vincent | 7ad74a7 | 2011-01-25 11:18:33 +0100 | [diff] [blame] | 2676 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | 
 | 2677 | { | 
 | 2678 | 	if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) | 
 | 2679 | 		dev->device_prep_slave_sg = d40_prep_slave_sg; | 
 | 2680 |  | 
 | 2681 | 	if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { | 
 | 2682 | 		dev->device_prep_dma_memcpy = d40_prep_memcpy; | 
 | 2683 |  | 
 | 2684 | 		/* | 
 | 2685 | 		 * This controller can only access address at even | 
 | 2686 | 		 * 32bit boundaries, i.e. 2^2 | 
 | 2687 | 		 */ | 
 | 2688 | 		dev->copy_align = 2; | 
 | 2689 | 	} | 
 | 2690 |  | 
 | 2691 | 	if (dma_has_cap(DMA_SG, dev->cap_mask)) | 
 | 2692 | 		dev->device_prep_dma_sg = d40_prep_memcpy_sg; | 
 | 2693 |  | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2694 | 	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | 
 | 2695 | 		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | 
 | 2696 |  | 
| Rabin Vincent | 7ad74a7 | 2011-01-25 11:18:33 +0100 | [diff] [blame] | 2697 | 	dev->device_alloc_chan_resources = d40_alloc_chan_resources; | 
 | 2698 | 	dev->device_free_chan_resources = d40_free_chan_resources; | 
 | 2699 | 	dev->device_issue_pending = d40_issue_pending; | 
 | 2700 | 	dev->device_tx_status = d40_tx_status; | 
 | 2701 | 	dev->device_control = d40_control; | 
 | 2702 | 	dev->dev = base->dev; | 
 | 2703 | } | 
 | 2704 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2705 | static int __init d40_dmaengine_init(struct d40_base *base, | 
 | 2706 | 				     int num_reserved_chans) | 
 | 2707 | { | 
 | 2708 | 	int err ; | 
 | 2709 |  | 
 | 2710 | 	d40_chan_init(base, &base->dma_slave, base->log_chans, | 
 | 2711 | 		      0, base->num_log_chans); | 
 | 2712 |  | 
 | 2713 | 	dma_cap_zero(base->dma_slave.cap_mask); | 
 | 2714 | 	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2715 | 	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2716 |  | 
| Rabin Vincent | 7ad74a7 | 2011-01-25 11:18:33 +0100 | [diff] [blame] | 2717 | 	d40_ops_init(base, &base->dma_slave); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2718 |  | 
 | 2719 | 	err = dma_async_device_register(&base->dma_slave); | 
 | 2720 |  | 
 | 2721 | 	if (err) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2722 | 		d40_err(base->dev, "Failed to register slave channels\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2723 | 		goto failure1; | 
 | 2724 | 	} | 
 | 2725 |  | 
 | 2726 | 	d40_chan_init(base, &base->dma_memcpy, base->log_chans, | 
 | 2727 | 		      base->num_log_chans, base->plat_data->memcpy_len); | 
 | 2728 |  | 
 | 2729 | 	dma_cap_zero(base->dma_memcpy.cap_mask); | 
 | 2730 | 	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 
| Rabin Vincent | 7ad74a7 | 2011-01-25 11:18:33 +0100 | [diff] [blame] | 2731 | 	dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2732 |  | 
| Rabin Vincent | 7ad74a7 | 2011-01-25 11:18:33 +0100 | [diff] [blame] | 2733 | 	d40_ops_init(base, &base->dma_memcpy); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2734 |  | 
 | 2735 | 	err = dma_async_device_register(&base->dma_memcpy); | 
 | 2736 |  | 
 | 2737 | 	if (err) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2738 | 		d40_err(base->dev, | 
 | 2739 | 			"Failed to regsiter memcpy only channels\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2740 | 		goto failure2; | 
 | 2741 | 	} | 
 | 2742 |  | 
 | 2743 | 	d40_chan_init(base, &base->dma_both, base->phy_chans, | 
 | 2744 | 		      0, num_reserved_chans); | 
 | 2745 |  | 
 | 2746 | 	dma_cap_zero(base->dma_both.cap_mask); | 
 | 2747 | 	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 
 | 2748 | 	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 
| Rabin Vincent | 7ad74a7 | 2011-01-25 11:18:33 +0100 | [diff] [blame] | 2749 | 	dma_cap_set(DMA_SG, base->dma_both.cap_mask); | 
| Rabin Vincent | 0c842b5 | 2011-01-25 11:18:35 +0100 | [diff] [blame] | 2750 | 	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2751 |  | 
| Rabin Vincent | 7ad74a7 | 2011-01-25 11:18:33 +0100 | [diff] [blame] | 2752 | 	d40_ops_init(base, &base->dma_both); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2753 | 	err = dma_async_device_register(&base->dma_both); | 
 | 2754 |  | 
 | 2755 | 	if (err) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2756 | 		d40_err(base->dev, | 
 | 2757 | 			"Failed to register logical and physical capable channels\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2758 | 		goto failure3; | 
 | 2759 | 	} | 
 | 2760 | 	return 0; | 
 | 2761 | failure3: | 
 | 2762 | 	dma_async_device_unregister(&base->dma_memcpy); | 
 | 2763 | failure2: | 
 | 2764 | 	dma_async_device_unregister(&base->dma_slave); | 
 | 2765 | failure1: | 
 | 2766 | 	return err; | 
 | 2767 | } | 
 | 2768 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2769 | /* Suspend resume functionality */ | 
 | 2770 | #ifdef CONFIG_PM | 
 | 2771 | static int dma40_pm_suspend(struct device *dev) | 
 | 2772 | { | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 2773 | 	struct platform_device *pdev = to_platform_device(dev); | 
 | 2774 | 	struct d40_base *base = platform_get_drvdata(pdev); | 
 | 2775 | 	int ret = 0; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2776 | 	if (!pm_runtime_suspended(dev)) | 
 | 2777 | 		return -EBUSY; | 
 | 2778 |  | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 2779 | 	if (base->lcpa_regulator) | 
 | 2780 | 		ret = regulator_disable(base->lcpa_regulator); | 
 | 2781 | 	return ret; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2782 | } | 
 | 2783 |  | 
 | 2784 | static int dma40_runtime_suspend(struct device *dev) | 
 | 2785 | { | 
 | 2786 | 	struct platform_device *pdev = to_platform_device(dev); | 
 | 2787 | 	struct d40_base *base = platform_get_drvdata(pdev); | 
 | 2788 |  | 
 | 2789 | 	d40_save_restore_registers(base, true); | 
 | 2790 |  | 
 | 2791 | 	/* Don't disable/enable clocks for v1 due to HW bugs */ | 
 | 2792 | 	if (base->rev != 1) | 
 | 2793 | 		writel_relaxed(base->gcc_pwr_off_mask, | 
 | 2794 | 			       base->virtbase + D40_DREG_GCC); | 
 | 2795 |  | 
 | 2796 | 	return 0; | 
 | 2797 | } | 
 | 2798 |  | 
 | 2799 | static int dma40_runtime_resume(struct device *dev) | 
 | 2800 | { | 
 | 2801 | 	struct platform_device *pdev = to_platform_device(dev); | 
 | 2802 | 	struct d40_base *base = platform_get_drvdata(pdev); | 
 | 2803 |  | 
 | 2804 | 	if (base->initialized) | 
 | 2805 | 		d40_save_restore_registers(base, false); | 
 | 2806 |  | 
 | 2807 | 	writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | 
 | 2808 | 		       base->virtbase + D40_DREG_GCC); | 
 | 2809 | 	return 0; | 
 | 2810 | } | 
 | 2811 |  | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 2812 | static int dma40_resume(struct device *dev) | 
 | 2813 | { | 
 | 2814 | 	struct platform_device *pdev = to_platform_device(dev); | 
 | 2815 | 	struct d40_base *base = platform_get_drvdata(pdev); | 
 | 2816 | 	int ret = 0; | 
 | 2817 |  | 
 | 2818 | 	if (base->lcpa_regulator) | 
 | 2819 | 		ret = regulator_enable(base->lcpa_regulator); | 
 | 2820 |  | 
 | 2821 | 	return ret; | 
 | 2822 | } | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2823 |  | 
 | 2824 | static const struct dev_pm_ops dma40_pm_ops = { | 
 | 2825 | 	.suspend		= dma40_pm_suspend, | 
 | 2826 | 	.runtime_suspend	= dma40_runtime_suspend, | 
 | 2827 | 	.runtime_resume		= dma40_runtime_resume, | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 2828 | 	.resume			= dma40_resume, | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2829 | }; | 
 | 2830 | #define DMA40_PM_OPS	(&dma40_pm_ops) | 
 | 2831 | #else | 
 | 2832 | #define DMA40_PM_OPS	NULL | 
 | 2833 | #endif | 
 | 2834 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2835 | /* Initialization functions. */ | 
 | 2836 |  | 
 | 2837 | static int __init d40_phy_res_init(struct d40_base *base) | 
 | 2838 | { | 
 | 2839 | 	int i; | 
 | 2840 | 	int num_phy_chans_avail = 0; | 
 | 2841 | 	u32 val[2]; | 
 | 2842 | 	int odd_even_bit = -2; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2843 | 	int gcc = D40_DREG_GCC_ENA; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2844 |  | 
 | 2845 | 	val[0] = readl(base->virtbase + D40_DREG_PRSME); | 
 | 2846 | 	val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 
 | 2847 |  | 
 | 2848 | 	for (i = 0; i < base->num_phy_chans; i++) { | 
 | 2849 | 		base->phy_res[i].num = i; | 
 | 2850 | 		odd_even_bit += 2 * ((i % 2) == 0); | 
 | 2851 | 		if (((val[i % 2] >> odd_even_bit) & 3) == 1) { | 
 | 2852 | 			/* Mark security only channels as occupied */ | 
 | 2853 | 			base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 
 | 2854 | 			base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2855 | 			base->phy_res[i].reserved = true; | 
 | 2856 | 			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | 
 | 2857 | 						       D40_DREG_GCC_SRC); | 
 | 2858 | 			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | 
 | 2859 | 						       D40_DREG_GCC_DST); | 
 | 2860 |  | 
 | 2861 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2862 | 		} else { | 
 | 2863 | 			base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 
 | 2864 | 			base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2865 | 			base->phy_res[i].reserved = false; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2866 | 			num_phy_chans_avail++; | 
 | 2867 | 		} | 
 | 2868 | 		spin_lock_init(&base->phy_res[i].lock); | 
 | 2869 | 	} | 
| Jonas Aaberg | 6b7acd8 | 2010-06-20 21:26:59 +0000 | [diff] [blame] | 2870 |  | 
 | 2871 | 	/* Mark disabled channels as occupied */ | 
 | 2872 | 	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { | 
| Rabin Vincent | f57b407 | 2010-10-06 08:20:35 +0000 | [diff] [blame] | 2873 | 		int chan = base->plat_data->disabled_channels[i]; | 
 | 2874 |  | 
 | 2875 | 		base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | 
 | 2876 | 		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2877 | 		base->phy_res[chan].reserved = true; | 
 | 2878 | 		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | 
 | 2879 | 					       D40_DREG_GCC_SRC); | 
 | 2880 | 		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | 
 | 2881 | 					       D40_DREG_GCC_DST); | 
| Rabin Vincent | f57b407 | 2010-10-06 08:20:35 +0000 | [diff] [blame] | 2882 | 		num_phy_chans_avail--; | 
| Jonas Aaberg | 6b7acd8 | 2010-06-20 21:26:59 +0000 | [diff] [blame] | 2883 | 	} | 
 | 2884 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2885 | 	dev_info(base->dev, "%d of %d physical DMA channels available\n", | 
 | 2886 | 		 num_phy_chans_avail, base->num_phy_chans); | 
 | 2887 |  | 
 | 2888 | 	/* Verify settings extended vs standard */ | 
 | 2889 | 	val[0] = readl(base->virtbase + D40_DREG_PRTYP); | 
 | 2890 |  | 
 | 2891 | 	for (i = 0; i < base->num_phy_chans; i++) { | 
 | 2892 |  | 
 | 2893 | 		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && | 
 | 2894 | 		    (val[0] & 0x3) != 1) | 
 | 2895 | 			dev_info(base->dev, | 
 | 2896 | 				 "[%s] INFO: channel %d is misconfigured (%d)\n", | 
 | 2897 | 				 __func__, i, val[0] & 0x3); | 
 | 2898 |  | 
 | 2899 | 		val[0] = val[0] >> 2; | 
 | 2900 | 	} | 
 | 2901 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 2902 | 	/* | 
 | 2903 | 	 * To keep things simple, Enable all clocks initially. | 
 | 2904 | 	 * The clocks will get managed later post channel allocation. | 
 | 2905 | 	 * The clocks for the event lines on which reserved channels exists | 
 | 2906 | 	 * are not managed here. | 
 | 2907 | 	 */ | 
 | 2908 | 	writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | 
 | 2909 | 	base->gcc_pwr_off_mask = gcc; | 
 | 2910 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2911 | 	return num_phy_chans_avail; | 
 | 2912 | } | 
 | 2913 |  | 
 | 2914 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 
 | 2915 | { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2916 | 	struct stedma40_platform_data *plat_data; | 
 | 2917 | 	struct clk *clk = NULL; | 
 | 2918 | 	void __iomem *virtbase = NULL; | 
 | 2919 | 	struct resource *res = NULL; | 
 | 2920 | 	struct d40_base *base = NULL; | 
 | 2921 | 	int num_log_chans = 0; | 
 | 2922 | 	int num_phy_chans; | 
| Ulf Hansson | b707c65 | 2012-08-23 13:41:58 +0200 | [diff] [blame] | 2923 | 	int clk_ret = -EINVAL; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2924 | 	int i; | 
| Linus Walleij | f4b8976 | 2011-06-27 11:33:46 +0200 | [diff] [blame] | 2925 | 	u32 pid; | 
 | 2926 | 	u32 cid; | 
 | 2927 | 	u8 rev; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2928 |  | 
 | 2929 | 	clk = clk_get(&pdev->dev, NULL); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2930 | 	if (IS_ERR(clk)) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 2931 | 		d40_err(&pdev->dev, "No matching clock found\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2932 | 		goto failure; | 
 | 2933 | 	} | 
 | 2934 |  | 
| Ulf Hansson | b707c65 | 2012-08-23 13:41:58 +0200 | [diff] [blame] | 2935 | 	clk_ret = clk_prepare_enable(clk); | 
 | 2936 | 	if (clk_ret) { | 
 | 2937 | 		d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); | 
 | 2938 | 		goto failure; | 
 | 2939 | 	} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2940 |  | 
 | 2941 | 	/* Get IO for DMAC base address */ | 
 | 2942 | 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | 
 | 2943 | 	if (!res) | 
 | 2944 | 		goto failure; | 
 | 2945 |  | 
 | 2946 | 	if (request_mem_region(res->start, resource_size(res), | 
 | 2947 | 			       D40_NAME " I/O base") == NULL) | 
 | 2948 | 		goto failure; | 
 | 2949 |  | 
 | 2950 | 	virtbase = ioremap(res->start, resource_size(res)); | 
 | 2951 | 	if (!virtbase) | 
 | 2952 | 		goto failure; | 
 | 2953 |  | 
| Linus Walleij | f4b8976 | 2011-06-27 11:33:46 +0200 | [diff] [blame] | 2954 | 	/* This is just a regular AMBA PrimeCell ID actually */ | 
 | 2955 | 	for (pid = 0, i = 0; i < 4; i++) | 
 | 2956 | 		pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) | 
 | 2957 | 			& 255) << (i * 8); | 
 | 2958 | 	for (cid = 0, i = 0; i < 4; i++) | 
 | 2959 | 		cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) | 
 | 2960 | 			& 255) << (i * 8); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2961 |  | 
| Linus Walleij | f4b8976 | 2011-06-27 11:33:46 +0200 | [diff] [blame] | 2962 | 	if (cid != AMBA_CID) { | 
 | 2963 | 		d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2964 | 		goto failure; | 
 | 2965 | 	} | 
| Linus Walleij | f4b8976 | 2011-06-27 11:33:46 +0200 | [diff] [blame] | 2966 | 	if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { | 
 | 2967 | 		d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", | 
 | 2968 | 			AMBA_MANF_BITS(pid), | 
 | 2969 | 			AMBA_VENDOR_ST); | 
 | 2970 | 		goto failure; | 
 | 2971 | 	} | 
 | 2972 | 	/* | 
 | 2973 | 	 * HW revision: | 
 | 2974 | 	 * DB8500ed has revision 0 | 
 | 2975 | 	 * ? has revision 1 | 
 | 2976 | 	 * DB8500v1 has revision 2 | 
 | 2977 | 	 * DB8500v2 has revision 3 | 
 | 2978 | 	 */ | 
 | 2979 | 	rev = AMBA_REV_BITS(pid); | 
| Jonas Aaberg | 3ae0267 | 2010-08-09 12:08:18 +0000 | [diff] [blame] | 2980 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2981 | 	/* The number of physical channels on this HW */ | 
 | 2982 | 	num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 
 | 2983 |  | 
 | 2984 | 	dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 
| Jonas Aaberg | 3ae0267 | 2010-08-09 12:08:18 +0000 | [diff] [blame] | 2985 | 		 rev, res->start); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2986 |  | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 2987 | 	if (rev < 2) { | 
 | 2988 | 		d40_err(&pdev->dev, "hardware revision: %d is not supported", | 
 | 2989 | 			rev); | 
 | 2990 | 		goto failure; | 
 | 2991 | 	} | 
 | 2992 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2993 | 	plat_data = pdev->dev.platform_data; | 
 | 2994 |  | 
 | 2995 | 	/* Count the number of logical channels in use */ | 
 | 2996 | 	for (i = 0; i < plat_data->dev_len; i++) | 
 | 2997 | 		if (plat_data->dev_rx[i] != 0) | 
 | 2998 | 			num_log_chans++; | 
 | 2999 |  | 
 | 3000 | 	for (i = 0; i < plat_data->dev_len; i++) | 
 | 3001 | 		if (plat_data->dev_tx[i] != 0) | 
 | 3002 | 			num_log_chans++; | 
 | 3003 |  | 
 | 3004 | 	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + | 
 | 3005 | 		       (num_phy_chans + num_log_chans + plat_data->memcpy_len) * | 
 | 3006 | 		       sizeof(struct d40_chan), GFP_KERNEL); | 
 | 3007 |  | 
 | 3008 | 	if (base == NULL) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 3009 | 		d40_err(&pdev->dev, "Out of memory\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3010 | 		goto failure; | 
 | 3011 | 	} | 
 | 3012 |  | 
| Jonas Aaberg | 3ae0267 | 2010-08-09 12:08:18 +0000 | [diff] [blame] | 3013 | 	base->rev = rev; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3014 | 	base->clk = clk; | 
 | 3015 | 	base->num_phy_chans = num_phy_chans; | 
 | 3016 | 	base->num_log_chans = num_log_chans; | 
 | 3017 | 	base->phy_start = res->start; | 
 | 3018 | 	base->phy_size = resource_size(res); | 
 | 3019 | 	base->virtbase = virtbase; | 
 | 3020 | 	base->plat_data = plat_data; | 
 | 3021 | 	base->dev = &pdev->dev; | 
 | 3022 | 	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | 
 | 3023 | 	base->log_chans = &base->phy_chans[num_phy_chans]; | 
 | 3024 |  | 
 | 3025 | 	base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), | 
 | 3026 | 				GFP_KERNEL); | 
 | 3027 | 	if (!base->phy_res) | 
 | 3028 | 		goto failure; | 
 | 3029 |  | 
 | 3030 | 	base->lookup_phy_chans = kzalloc(num_phy_chans * | 
 | 3031 | 					 sizeof(struct d40_chan *), | 
 | 3032 | 					 GFP_KERNEL); | 
 | 3033 | 	if (!base->lookup_phy_chans) | 
 | 3034 | 		goto failure; | 
 | 3035 |  | 
 | 3036 | 	if (num_log_chans + plat_data->memcpy_len) { | 
 | 3037 | 		/* | 
 | 3038 | 		 * The max number of logical channels are event lines for all | 
 | 3039 | 		 * src devices and dst devices | 
 | 3040 | 		 */ | 
 | 3041 | 		base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * | 
 | 3042 | 						 sizeof(struct d40_chan *), | 
 | 3043 | 						 GFP_KERNEL); | 
 | 3044 | 		if (!base->lookup_log_chans) | 
 | 3045 | 			goto failure; | 
 | 3046 | 	} | 
| Jonas Aaberg | 698e473 | 2010-08-09 12:08:56 +0000 | [diff] [blame] | 3047 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 3048 | 	base->reg_val_backup_chan = kmalloc(base->num_phy_chans * | 
 | 3049 | 					    sizeof(d40_backup_regs_chan), | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3050 | 					    GFP_KERNEL); | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 3051 | 	if (!base->reg_val_backup_chan) | 
 | 3052 | 		goto failure; | 
 | 3053 |  | 
 | 3054 | 	base->lcla_pool.alloc_map = | 
 | 3055 | 		kzalloc(num_phy_chans * sizeof(struct d40_desc *) | 
 | 3056 | 			* D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3057 | 	if (!base->lcla_pool.alloc_map) | 
 | 3058 | 		goto failure; | 
 | 3059 |  | 
| Jonas Aaberg | c675b1b | 2010-06-20 21:25:08 +0000 | [diff] [blame] | 3060 | 	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), | 
 | 3061 | 					    0, SLAB_HWCACHE_ALIGN, | 
 | 3062 | 					    NULL); | 
 | 3063 | 	if (base->desc_slab == NULL) | 
 | 3064 | 		goto failure; | 
 | 3065 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3066 | 	return base; | 
 | 3067 |  | 
 | 3068 | failure: | 
| Ulf Hansson | b707c65 | 2012-08-23 13:41:58 +0200 | [diff] [blame] | 3069 | 	if (!clk_ret) | 
 | 3070 | 		clk_disable_unprepare(clk); | 
 | 3071 | 	if (!IS_ERR(clk)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3072 | 		clk_put(clk); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3073 | 	if (virtbase) | 
 | 3074 | 		iounmap(virtbase); | 
 | 3075 | 	if (res) | 
 | 3076 | 		release_mem_region(res->start, | 
 | 3077 | 				   resource_size(res)); | 
 | 3078 | 	if (virtbase) | 
 | 3079 | 		iounmap(virtbase); | 
 | 3080 |  | 
 | 3081 | 	if (base) { | 
 | 3082 | 		kfree(base->lcla_pool.alloc_map); | 
| Narayanan G | 1bdae6f | 2012-02-09 12:41:37 +0530 | [diff] [blame] | 3083 | 		kfree(base->reg_val_backup_chan); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3084 | 		kfree(base->lookup_log_chans); | 
 | 3085 | 		kfree(base->lookup_phy_chans); | 
 | 3086 | 		kfree(base->phy_res); | 
 | 3087 | 		kfree(base); | 
 | 3088 | 	} | 
 | 3089 |  | 
 | 3090 | 	return NULL; | 
 | 3091 | } | 
 | 3092 |  | 
 | 3093 | static void __init d40_hw_init(struct d40_base *base) | 
 | 3094 | { | 
 | 3095 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 3096 | 	static struct d40_reg_val dma_init_reg[] = { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3097 | 		/* Clock every part of the DMA block from start */ | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 3098 | 		{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL}, | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3099 |  | 
 | 3100 | 		/* Interrupts on all logical channels */ | 
 | 3101 | 		{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 
 | 3102 | 		{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | 
 | 3103 | 		{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | 
 | 3104 | 		{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | 
 | 3105 | 		{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | 
 | 3106 | 		{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | 
 | 3107 | 		{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | 
 | 3108 | 		{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | 
 | 3109 | 		{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | 
 | 3110 | 		{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | 
 | 3111 | 		{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | 
 | 3112 | 		{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | 
 | 3113 | 	}; | 
 | 3114 | 	int i; | 
 | 3115 | 	u32 prmseo[2] = {0, 0}; | 
 | 3116 | 	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | 
 | 3117 | 	u32 pcmis = 0; | 
 | 3118 | 	u32 pcicr = 0; | 
 | 3119 |  | 
 | 3120 | 	for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) | 
 | 3121 | 		writel(dma_init_reg[i].val, | 
 | 3122 | 		       base->virtbase + dma_init_reg[i].reg); | 
 | 3123 |  | 
 | 3124 | 	/* Configure all our dma channels to default settings */ | 
 | 3125 | 	for (i = 0; i < base->num_phy_chans; i++) { | 
 | 3126 |  | 
 | 3127 | 		activeo[i % 2] = activeo[i % 2] << 2; | 
 | 3128 |  | 
 | 3129 | 		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src | 
 | 3130 | 		    == D40_ALLOC_PHY) { | 
 | 3131 | 			activeo[i % 2] |= 3; | 
 | 3132 | 			continue; | 
 | 3133 | 		} | 
 | 3134 |  | 
 | 3135 | 		/* Enable interrupt # */ | 
 | 3136 | 		pcmis = (pcmis << 1) | 1; | 
 | 3137 |  | 
 | 3138 | 		/* Clear interrupt # */ | 
 | 3139 | 		pcicr = (pcicr << 1) | 1; | 
 | 3140 |  | 
 | 3141 | 		/* Set channel to physical mode */ | 
 | 3142 | 		prmseo[i % 2] = prmseo[i % 2] << 2; | 
 | 3143 | 		prmseo[i % 2] |= 1; | 
 | 3144 |  | 
 | 3145 | 	} | 
 | 3146 |  | 
 | 3147 | 	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); | 
 | 3148 | 	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); | 
 | 3149 | 	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); | 
 | 3150 | 	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | 
 | 3151 |  | 
 | 3152 | 	/* Write which interrupt to enable */ | 
 | 3153 | 	writel(pcmis, base->virtbase + D40_DREG_PCMIS); | 
 | 3154 |  | 
 | 3155 | 	/* Write which interrupt to clear */ | 
 | 3156 | 	writel(pcicr, base->virtbase + D40_DREG_PCICR); | 
 | 3157 |  | 
 | 3158 | } | 
 | 3159 |  | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 3160 | static int __init d40_lcla_allocate(struct d40_base *base) | 
 | 3161 | { | 
| Rabin Vincent | 026cbc4 | 2011-01-25 11:18:14 +0100 | [diff] [blame] | 3162 | 	struct d40_lcla_pool *pool = &base->lcla_pool; | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 3163 | 	unsigned long *page_list; | 
 | 3164 | 	int i, j; | 
 | 3165 | 	int ret = 0; | 
 | 3166 |  | 
 | 3167 | 	/* | 
 | 3168 | 	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, | 
 | 3169 | 	 * To full fill this hardware requirement without wasting 256 kb | 
 | 3170 | 	 * we allocate pages until we get an aligned one. | 
 | 3171 | 	 */ | 
 | 3172 | 	page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, | 
 | 3173 | 			    GFP_KERNEL); | 
 | 3174 |  | 
 | 3175 | 	if (!page_list) { | 
 | 3176 | 		ret = -ENOMEM; | 
 | 3177 | 		goto failure; | 
 | 3178 | 	} | 
 | 3179 |  | 
 | 3180 | 	/* Calculating how many pages that are required */ | 
 | 3181 | 	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; | 
 | 3182 |  | 
 | 3183 | 	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { | 
 | 3184 | 		page_list[i] = __get_free_pages(GFP_KERNEL, | 
 | 3185 | 						base->lcla_pool.pages); | 
 | 3186 | 		if (!page_list[i]) { | 
 | 3187 |  | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 3188 | 			d40_err(base->dev, "Failed to allocate %d pages.\n", | 
 | 3189 | 				base->lcla_pool.pages); | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 3190 |  | 
 | 3191 | 			for (j = 0; j < i; j++) | 
 | 3192 | 				free_pages(page_list[j], base->lcla_pool.pages); | 
 | 3193 | 			goto failure; | 
 | 3194 | 		} | 
 | 3195 |  | 
 | 3196 | 		if ((virt_to_phys((void *)page_list[i]) & | 
 | 3197 | 		     (LCLA_ALIGNMENT - 1)) == 0) | 
 | 3198 | 			break; | 
 | 3199 | 	} | 
 | 3200 |  | 
 | 3201 | 	for (j = 0; j < i; j++) | 
 | 3202 | 		free_pages(page_list[j], base->lcla_pool.pages); | 
 | 3203 |  | 
 | 3204 | 	if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | 
 | 3205 | 		base->lcla_pool.base = (void *)page_list[i]; | 
 | 3206 | 	} else { | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 3207 | 		/* | 
 | 3208 | 		 * After many attempts and no succees with finding the correct | 
 | 3209 | 		 * alignment, try with allocating a big buffer. | 
 | 3210 | 		 */ | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 3211 | 		dev_warn(base->dev, | 
 | 3212 | 			 "[%s] Failed to get %d pages @ 18 bit align.\n", | 
 | 3213 | 			 __func__, base->lcla_pool.pages); | 
 | 3214 | 		base->lcla_pool.base_unaligned = kmalloc(SZ_1K * | 
 | 3215 | 							 base->num_phy_chans + | 
 | 3216 | 							 LCLA_ALIGNMENT, | 
 | 3217 | 							 GFP_KERNEL); | 
 | 3218 | 		if (!base->lcla_pool.base_unaligned) { | 
 | 3219 | 			ret = -ENOMEM; | 
 | 3220 | 			goto failure; | 
 | 3221 | 		} | 
 | 3222 |  | 
 | 3223 | 		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, | 
 | 3224 | 						 LCLA_ALIGNMENT); | 
 | 3225 | 	} | 
 | 3226 |  | 
| Rabin Vincent | 026cbc4 | 2011-01-25 11:18:14 +0100 | [diff] [blame] | 3227 | 	pool->dma_addr = dma_map_single(base->dev, pool->base, | 
 | 3228 | 					SZ_1K * base->num_phy_chans, | 
 | 3229 | 					DMA_TO_DEVICE); | 
 | 3230 | 	if (dma_mapping_error(base->dev, pool->dma_addr)) { | 
 | 3231 | 		pool->dma_addr = 0; | 
 | 3232 | 		ret = -ENOMEM; | 
 | 3233 | 		goto failure; | 
 | 3234 | 	} | 
 | 3235 |  | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 3236 | 	writel(virt_to_phys(base->lcla_pool.base), | 
 | 3237 | 	       base->virtbase + D40_DREG_LCLA); | 
 | 3238 | failure: | 
 | 3239 | 	kfree(page_list); | 
 | 3240 | 	return ret; | 
 | 3241 | } | 
 | 3242 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3243 | static int __init d40_probe(struct platform_device *pdev) | 
 | 3244 | { | 
 | 3245 | 	int err; | 
 | 3246 | 	int ret = -ENOENT; | 
 | 3247 | 	struct d40_base *base; | 
 | 3248 | 	struct resource *res = NULL; | 
 | 3249 | 	int num_reserved_chans; | 
 | 3250 | 	u32 val; | 
 | 3251 |  | 
 | 3252 | 	base = d40_hw_detect_init(pdev); | 
 | 3253 |  | 
 | 3254 | 	if (!base) | 
 | 3255 | 		goto failure; | 
 | 3256 |  | 
 | 3257 | 	num_reserved_chans = d40_phy_res_init(base); | 
 | 3258 |  | 
 | 3259 | 	platform_set_drvdata(pdev, base); | 
 | 3260 |  | 
 | 3261 | 	spin_lock_init(&base->interrupt_lock); | 
 | 3262 | 	spin_lock_init(&base->execmd_lock); | 
 | 3263 |  | 
 | 3264 | 	/* Get IO for logical channel parameter address */ | 
 | 3265 | 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | 
 | 3266 | 	if (!res) { | 
 | 3267 | 		ret = -ENOENT; | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 3268 | 		d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3269 | 		goto failure; | 
 | 3270 | 	} | 
 | 3271 | 	base->lcpa_size = resource_size(res); | 
 | 3272 | 	base->phy_lcpa = res->start; | 
 | 3273 |  | 
 | 3274 | 	if (request_mem_region(res->start, resource_size(res), | 
 | 3275 | 			       D40_NAME " I/O lcpa") == NULL) { | 
 | 3276 | 		ret = -EBUSY; | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 3277 | 		d40_err(&pdev->dev, | 
 | 3278 | 			"Failed to request LCPA region 0x%x-0x%x\n", | 
 | 3279 | 			res->start, res->end); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3280 | 		goto failure; | 
 | 3281 | 	} | 
 | 3282 |  | 
 | 3283 | 	/* We make use of ESRAM memory for this. */ | 
 | 3284 | 	val = readl(base->virtbase + D40_DREG_LCPA); | 
 | 3285 | 	if (res->start != val && val != 0) { | 
 | 3286 | 		dev_warn(&pdev->dev, | 
 | 3287 | 			 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", | 
 | 3288 | 			 __func__, val, res->start); | 
 | 3289 | 	} else | 
 | 3290 | 		writel(res->start, base->virtbase + D40_DREG_LCPA); | 
 | 3291 |  | 
 | 3292 | 	base->lcpa_base = ioremap(res->start, resource_size(res)); | 
 | 3293 | 	if (!base->lcpa_base) { | 
 | 3294 | 		ret = -ENOMEM; | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 3295 | 		d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3296 | 		goto failure; | 
 | 3297 | 	} | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 3298 | 	/* If lcla has to be located in ESRAM we don't need to allocate */ | 
 | 3299 | 	if (base->plat_data->use_esram_lcla) { | 
 | 3300 | 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 
 | 3301 | 							"lcla_esram"); | 
 | 3302 | 		if (!res) { | 
 | 3303 | 			ret = -ENOENT; | 
 | 3304 | 			d40_err(&pdev->dev, | 
 | 3305 | 				"No \"lcla_esram\" memory resource\n"); | 
 | 3306 | 			goto failure; | 
 | 3307 | 		} | 
 | 3308 | 		base->lcla_pool.base = ioremap(res->start, | 
 | 3309 | 						resource_size(res)); | 
 | 3310 | 		if (!base->lcla_pool.base) { | 
 | 3311 | 			ret = -ENOMEM; | 
 | 3312 | 			d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | 
 | 3313 | 			goto failure; | 
 | 3314 | 		} | 
 | 3315 | 		writel(res->start, base->virtbase + D40_DREG_LCLA); | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 3316 |  | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 3317 | 	} else { | 
 | 3318 | 		ret = d40_lcla_allocate(base); | 
 | 3319 | 		if (ret) { | 
 | 3320 | 			d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | 
 | 3321 | 			goto failure; | 
 | 3322 | 		} | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3323 | 	} | 
 | 3324 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3325 | 	spin_lock_init(&base->lcla_pool.lock); | 
 | 3326 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3327 | 	base->irq = platform_get_irq(pdev, 0); | 
 | 3328 |  | 
 | 3329 | 	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3330 | 	if (ret) { | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 3331 | 		d40_err(&pdev->dev, "No IRQ defined\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3332 | 		goto failure; | 
 | 3333 | 	} | 
 | 3334 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 3335 | 	pm_runtime_irq_safe(base->dev); | 
 | 3336 | 	pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | 
 | 3337 | 	pm_runtime_use_autosuspend(base->dev); | 
 | 3338 | 	pm_runtime_enable(base->dev); | 
 | 3339 | 	pm_runtime_resume(base->dev); | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 3340 |  | 
 | 3341 | 	if (base->plat_data->use_esram_lcla) { | 
 | 3342 |  | 
 | 3343 | 		base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | 
 | 3344 | 		if (IS_ERR(base->lcpa_regulator)) { | 
 | 3345 | 			d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | 
 | 3346 | 			base->lcpa_regulator = NULL; | 
 | 3347 | 			goto failure; | 
 | 3348 | 		} | 
 | 3349 |  | 
 | 3350 | 		ret = regulator_enable(base->lcpa_regulator); | 
 | 3351 | 		if (ret) { | 
 | 3352 | 			d40_err(&pdev->dev, | 
 | 3353 | 				"Failed to enable lcpa_regulator\n"); | 
 | 3354 | 			regulator_put(base->lcpa_regulator); | 
 | 3355 | 			base->lcpa_regulator = NULL; | 
 | 3356 | 			goto failure; | 
 | 3357 | 		} | 
 | 3358 | 	} | 
 | 3359 |  | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 3360 | 	base->initialized = true; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3361 | 	err = d40_dmaengine_init(base, num_reserved_chans); | 
 | 3362 | 	if (err) | 
 | 3363 | 		goto failure; | 
 | 3364 |  | 
 | 3365 | 	d40_hw_init(base); | 
 | 3366 |  | 
 | 3367 | 	dev_info(base->dev, "initialized\n"); | 
 | 3368 | 	return 0; | 
 | 3369 |  | 
 | 3370 | failure: | 
 | 3371 | 	if (base) { | 
| Jonas Aaberg | c675b1b | 2010-06-20 21:25:08 +0000 | [diff] [blame] | 3372 | 		if (base->desc_slab) | 
 | 3373 | 			kmem_cache_destroy(base->desc_slab); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3374 | 		if (base->virtbase) | 
 | 3375 | 			iounmap(base->virtbase); | 
| Rabin Vincent | 026cbc4 | 2011-01-25 11:18:14 +0100 | [diff] [blame] | 3376 |  | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 3377 | 		if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { | 
 | 3378 | 			iounmap(base->lcla_pool.base); | 
 | 3379 | 			base->lcla_pool.base = NULL; | 
 | 3380 | 		} | 
 | 3381 |  | 
| Rabin Vincent | 026cbc4 | 2011-01-25 11:18:14 +0100 | [diff] [blame] | 3382 | 		if (base->lcla_pool.dma_addr) | 
 | 3383 | 			dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | 
 | 3384 | 					 SZ_1K * base->num_phy_chans, | 
 | 3385 | 					 DMA_TO_DEVICE); | 
 | 3386 |  | 
| Linus Walleij | 508849a | 2010-06-20 21:26:07 +0000 | [diff] [blame] | 3387 | 		if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) | 
 | 3388 | 			free_pages((unsigned long)base->lcla_pool.base, | 
 | 3389 | 				   base->lcla_pool.pages); | 
| Jonas Aaberg | 767a967 | 2010-08-09 12:08:34 +0000 | [diff] [blame] | 3390 |  | 
 | 3391 | 		kfree(base->lcla_pool.base_unaligned); | 
 | 3392 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3393 | 		if (base->phy_lcpa) | 
 | 3394 | 			release_mem_region(base->phy_lcpa, | 
 | 3395 | 					   base->lcpa_size); | 
 | 3396 | 		if (base->phy_start) | 
 | 3397 | 			release_mem_region(base->phy_start, | 
 | 3398 | 					   base->phy_size); | 
 | 3399 | 		if (base->clk) { | 
 | 3400 | 			clk_disable(base->clk); | 
 | 3401 | 			clk_put(base->clk); | 
 | 3402 | 		} | 
 | 3403 |  | 
| Narayanan G | 28c7a19 | 2011-11-22 13:56:55 +0530 | [diff] [blame] | 3404 | 		if (base->lcpa_regulator) { | 
 | 3405 | 			regulator_disable(base->lcpa_regulator); | 
 | 3406 | 			regulator_put(base->lcpa_regulator); | 
 | 3407 | 		} | 
 | 3408 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3409 | 		kfree(base->lcla_pool.alloc_map); | 
 | 3410 | 		kfree(base->lookup_log_chans); | 
 | 3411 | 		kfree(base->lookup_phy_chans); | 
 | 3412 | 		kfree(base->phy_res); | 
 | 3413 | 		kfree(base); | 
 | 3414 | 	} | 
 | 3415 |  | 
| Rabin Vincent | 6db5a8b | 2011-01-25 11:18:09 +0100 | [diff] [blame] | 3416 | 	d40_err(&pdev->dev, "probe failed\n"); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3417 | 	return ret; | 
 | 3418 | } | 
 | 3419 |  | 
 | 3420 | static struct platform_driver d40_driver = { | 
 | 3421 | 	.driver = { | 
 | 3422 | 		.owner = THIS_MODULE, | 
 | 3423 | 		.name  = D40_NAME, | 
| Narayanan G | 7fb3e75 | 2011-11-17 17:26:41 +0530 | [diff] [blame] | 3424 | 		.pm = DMA40_PM_OPS, | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3425 | 	}, | 
 | 3426 | }; | 
 | 3427 |  | 
| Rabin Vincent | cb9ab2d | 2011-01-25 11:18:04 +0100 | [diff] [blame] | 3428 | static int __init stedma40_init(void) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 3429 | { | 
 | 3430 | 	return platform_driver_probe(&d40_driver, d40_probe); | 
 | 3431 | } | 
| Linus Walleij | a0eb221 | 2011-05-18 14:18:57 +0200 | [diff] [blame] | 3432 | subsys_initcall(stedma40_init); |