| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * driver/dma/ste_dma40.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) ST-Ericsson 2007-2010 | 
|  | 5 | * License terms: GNU General Public License (GPL) version 2 | 
|  | 6 | * Author: Per Friden <per.friden@stericsson.com> | 
|  | 7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | 
|  | 8 | * | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/kernel.h> | 
|  | 12 | #include <linux/slab.h> | 
|  | 13 | #include <linux/dmaengine.h> | 
|  | 14 | #include <linux/platform_device.h> | 
|  | 15 | #include <linux/clk.h> | 
|  | 16 | #include <linux/delay.h> | 
|  | 17 |  | 
|  | 18 | #include <plat/ste_dma40.h> | 
|  | 19 |  | 
|  | 20 | #include "ste_dma40_ll.h" | 
|  | 21 |  | 
|  | 22 | #define D40_NAME "dma40" | 
|  | 23 |  | 
|  | 24 | #define D40_PHY_CHAN -1 | 
|  | 25 |  | 
|  | 26 | /* For masking out/in 2 bit channel positions */ | 
|  | 27 | #define D40_CHAN_POS(chan)  (2 * (chan / 2)) | 
|  | 28 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) | 
|  | 29 |  | 
|  | 30 | /* Maximum iterations taken before giving up suspending a channel */ | 
|  | 31 | #define D40_SUSPEND_MAX_IT 500 | 
|  | 32 |  | 
|  | 33 | #define D40_ALLOC_FREE		(1 << 31) | 
|  | 34 | #define D40_ALLOC_PHY		(1 << 30) | 
|  | 35 | #define D40_ALLOC_LOG_FREE	0 | 
|  | 36 |  | 
|  | 37 | /* The number of free d40_desc to keep in memory before starting | 
|  | 38 | * to kfree() them */ | 
|  | 39 | #define D40_DESC_CACHE_SIZE 50 | 
|  | 40 |  | 
|  | 41 | /* Hardware designer of the block */ | 
|  | 42 | #define D40_PERIPHID2_DESIGNER 0x8 | 
|  | 43 |  | 
|  | 44 | /** | 
|  | 45 | * enum 40_command - The different commands and/or statuses. | 
|  | 46 | * | 
|  | 47 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, | 
|  | 48 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. | 
|  | 49 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. | 
|  | 50 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. | 
|  | 51 | */ | 
|  | 52 | enum d40_command { | 
|  | 53 | D40_DMA_STOP		= 0, | 
|  | 54 | D40_DMA_RUN		= 1, | 
|  | 55 | D40_DMA_SUSPEND_REQ	= 2, | 
|  | 56 | D40_DMA_SUSPENDED	= 3 | 
|  | 57 | }; | 
|  | 58 |  | 
|  | 59 | /** | 
|  | 60 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 
|  | 61 | * | 
|  | 62 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | 
|  | 63 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | 
|  | 64 | * pre_alloc_lli is used. | 
|  | 65 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. | 
|  | 66 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | 
|  | 67 | * one buffer to one buffer. | 
|  | 68 | */ | 
|  | 69 | struct d40_lli_pool { | 
|  | 70 | void	*base; | 
|  | 71 | int	size; | 
|  | 72 | /* Space for dst and src, plus an extra for padding */ | 
|  | 73 | u8	pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 
|  | 74 | }; | 
|  | 75 |  | 
|  | 76 | /** | 
|  | 77 | * struct d40_desc - A descriptor is one DMA job. | 
|  | 78 | * | 
|  | 79 | * @lli_phy: LLI settings for physical channel. Both src and dst= | 
|  | 80 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if | 
|  | 81 | * lli_len equals one. | 
|  | 82 | * @lli_log: Same as above but for logical channels. | 
|  | 83 | * @lli_pool: The pool with two entries pre-allocated. | 
|  | 84 | * @lli_len: Number of LLI's in lli_pool | 
|  | 85 | * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len | 
|  | 86 | * then this transfer job is done. | 
|  | 87 | * @txd: DMA engine struct. Used for among other things for communication | 
|  | 88 | * during a transfer. | 
|  | 89 | * @node: List entry. | 
|  | 90 | * @dir: The transfer direction of this job. | 
|  | 91 | * @is_in_client_list: true if the client owns this descriptor. | 
|  | 92 | * | 
|  | 93 | * This descriptor is used for both logical and physical transfers. | 
|  | 94 | */ | 
|  | 95 |  | 
|  | 96 | struct d40_desc { | 
|  | 97 | /* LLI physical */ | 
|  | 98 | struct d40_phy_lli_bidir	 lli_phy; | 
|  | 99 | /* LLI logical */ | 
|  | 100 | struct d40_log_lli_bidir	 lli_log; | 
|  | 101 |  | 
|  | 102 | struct d40_lli_pool		 lli_pool; | 
|  | 103 | u32				 lli_len; | 
|  | 104 | u32				 lli_tcount; | 
|  | 105 |  | 
|  | 106 | struct dma_async_tx_descriptor	 txd; | 
|  | 107 | struct list_head		 node; | 
|  | 108 |  | 
|  | 109 | enum dma_data_direction		 dir; | 
|  | 110 | bool				 is_in_client_list; | 
|  | 111 | }; | 
|  | 112 |  | 
|  | 113 | /** | 
|  | 114 | * struct d40_lcla_pool - LCLA pool settings and data. | 
|  | 115 | * | 
|  | 116 | * @base: The virtual address of LCLA. | 
|  | 117 | * @phy: Physical base address of LCLA. | 
|  | 118 | * @base_size: size of lcla. | 
|  | 119 | * @lock: Lock to protect the content in this struct. | 
|  | 120 | * @alloc_map: Mapping between physical channel and LCLA entries. | 
|  | 121 | * @num_blocks: The number of entries of alloc_map. Equals to the | 
|  | 122 | * number of physical channels. | 
|  | 123 | */ | 
|  | 124 | struct d40_lcla_pool { | 
|  | 125 | void		*base; | 
|  | 126 | dma_addr_t	 phy; | 
|  | 127 | resource_size_t  base_size; | 
|  | 128 | spinlock_t	 lock; | 
|  | 129 | u32		*alloc_map; | 
|  | 130 | int		 num_blocks; | 
|  | 131 | }; | 
|  | 132 |  | 
|  | 133 | /** | 
|  | 134 | * struct d40_phy_res - struct for handling eventlines mapped to physical | 
|  | 135 | * channels. | 
|  | 136 | * | 
|  | 137 | * @lock: A lock protection this entity. | 
|  | 138 | * @num: The physical channel number of this entity. | 
|  | 139 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 
|  | 140 | * this physical channel. Can also be free or physically allocated. | 
|  | 141 | * @allocated_dst: Same as for src but is dst. | 
|  | 142 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | 
|  | 143 | * event line number. Both allocated_src and allocated_dst can not be | 
|  | 144 | * allocated to a physical channel, since the interrupt handler has then | 
|  | 145 | * no way of figure out which one the interrupt belongs to. | 
|  | 146 | */ | 
|  | 147 | struct d40_phy_res { | 
|  | 148 | spinlock_t lock; | 
|  | 149 | int	   num; | 
|  | 150 | u32	   allocated_src; | 
|  | 151 | u32	   allocated_dst; | 
|  | 152 | }; | 
|  | 153 |  | 
|  | 154 | struct d40_base; | 
|  | 155 |  | 
|  | 156 | /** | 
|  | 157 | * struct d40_chan - Struct that describes a channel. | 
|  | 158 | * | 
|  | 159 | * @lock: A spinlock to protect this struct. | 
|  | 160 | * @log_num: The logical number, if any of this channel. | 
|  | 161 | * @completed: Starts with 1, after first interrupt it is set to dma engine's | 
|  | 162 | * current cookie. | 
|  | 163 | * @pending_tx: The number of pending transfers. Used between interrupt handler | 
|  | 164 | * and tasklet. | 
|  | 165 | * @busy: Set to true when transfer is ongoing on this channel. | 
|  | 166 | * @phy_chan: Pointer to physical channel which this instance runs on. | 
|  | 167 | * @chan: DMA engine handle. | 
|  | 168 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | 
|  | 169 | * transfer and call client callback. | 
|  | 170 | * @client: Cliented owned descriptor list. | 
|  | 171 | * @active: Active descriptor. | 
|  | 172 | * @queue: Queued jobs. | 
|  | 173 | * @free: List of free descripts, ready to be reused. | 
|  | 174 | * @free_len: Number of descriptors in the free list. | 
|  | 175 | * @dma_cfg: The client configuration of this dma channel. | 
|  | 176 | * @base: Pointer to the device instance struct. | 
|  | 177 | * @src_def_cfg: Default cfg register setting for src. | 
|  | 178 | * @dst_def_cfg: Default cfg register setting for dst. | 
|  | 179 | * @log_def: Default logical channel settings. | 
|  | 180 | * @lcla: Space for one dst src pair for logical channel transfers. | 
|  | 181 | * @lcpa: Pointer to dst and src lcpa settings. | 
|  | 182 | * | 
|  | 183 | * This struct can either "be" a logical or a physical channel. | 
|  | 184 | */ | 
|  | 185 | struct d40_chan { | 
|  | 186 | spinlock_t			 lock; | 
|  | 187 | int				 log_num; | 
|  | 188 | /* ID of the most recent completed transfer */ | 
|  | 189 | int				 completed; | 
|  | 190 | int				 pending_tx; | 
|  | 191 | bool				 busy; | 
|  | 192 | struct d40_phy_res		*phy_chan; | 
|  | 193 | struct dma_chan			 chan; | 
|  | 194 | struct tasklet_struct		 tasklet; | 
|  | 195 | struct list_head		 client; | 
|  | 196 | struct list_head		 active; | 
|  | 197 | struct list_head		 queue; | 
|  | 198 | struct list_head		 free; | 
|  | 199 | int				 free_len; | 
|  | 200 | struct stedma40_chan_cfg	 dma_cfg; | 
|  | 201 | struct d40_base			*base; | 
|  | 202 | /* Default register configurations */ | 
|  | 203 | u32				 src_def_cfg; | 
|  | 204 | u32				 dst_def_cfg; | 
|  | 205 | struct d40_def_lcsp		 log_def; | 
|  | 206 | struct d40_lcla_elem		 lcla; | 
|  | 207 | struct d40_log_lli_full		*lcpa; | 
|  | 208 | }; | 
|  | 209 |  | 
|  | 210 | /** | 
|  | 211 | * struct d40_base - The big global struct, one for each probe'd instance. | 
|  | 212 | * | 
|  | 213 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | 
|  | 214 | * @execmd_lock: Lock for execute command usage since several channels share | 
|  | 215 | * the same physical register. | 
|  | 216 | * @dev: The device structure. | 
|  | 217 | * @virtbase: The virtual base address of the DMA's register. | 
|  | 218 | * @clk: Pointer to the DMA clock structure. | 
|  | 219 | * @phy_start: Physical memory start of the DMA registers. | 
|  | 220 | * @phy_size: Size of the DMA register map. | 
|  | 221 | * @irq: The IRQ number. | 
|  | 222 | * @num_phy_chans: The number of physical channels. Read from HW. This | 
|  | 223 | * is the number of available channels for this driver, not counting "Secure | 
|  | 224 | * mode" allocated physical channels. | 
|  | 225 | * @num_log_chans: The number of logical channels. Calculated from | 
|  | 226 | * num_phy_chans. | 
|  | 227 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 
|  | 228 | * @dma_slave: dma_device channels that can do only do slave transfers. | 
|  | 229 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 
|  | 230 | * @phy_chans: Room for all possible physical channels in system. | 
|  | 231 | * @log_chans: Room for all possible logical channels in system. | 
|  | 232 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 
|  | 233 | * to log_chans entries. | 
|  | 234 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points | 
|  | 235 | * to phy_chans entries. | 
|  | 236 | * @plat_data: Pointer to provided platform_data which is the driver | 
|  | 237 | * configuration. | 
|  | 238 | * @phy_res: Vector containing all physical channels. | 
|  | 239 | * @lcla_pool: lcla pool settings and data. | 
|  | 240 | * @lcpa_base: The virtual mapped address of LCPA. | 
|  | 241 | * @phy_lcpa: The physical address of the LCPA. | 
|  | 242 | * @lcpa_size: The size of the LCPA area. | 
|  | 243 | */ | 
|  | 244 | struct d40_base { | 
|  | 245 | spinlock_t			 interrupt_lock; | 
|  | 246 | spinlock_t			 execmd_lock; | 
|  | 247 | struct device			 *dev; | 
|  | 248 | void __iomem			 *virtbase; | 
|  | 249 | struct clk			 *clk; | 
|  | 250 | phys_addr_t			  phy_start; | 
|  | 251 | resource_size_t			  phy_size; | 
|  | 252 | int				  irq; | 
|  | 253 | int				  num_phy_chans; | 
|  | 254 | int				  num_log_chans; | 
|  | 255 | struct dma_device		  dma_both; | 
|  | 256 | struct dma_device		  dma_slave; | 
|  | 257 | struct dma_device		  dma_memcpy; | 
|  | 258 | struct d40_chan			 *phy_chans; | 
|  | 259 | struct d40_chan			 *log_chans; | 
|  | 260 | struct d40_chan			**lookup_log_chans; | 
|  | 261 | struct d40_chan			**lookup_phy_chans; | 
|  | 262 | struct stedma40_platform_data	 *plat_data; | 
|  | 263 | /* Physical half channels */ | 
|  | 264 | struct d40_phy_res		 *phy_res; | 
|  | 265 | struct d40_lcla_pool		  lcla_pool; | 
|  | 266 | void				 *lcpa_base; | 
|  | 267 | dma_addr_t			  phy_lcpa; | 
|  | 268 | resource_size_t			  lcpa_size; | 
|  | 269 | }; | 
|  | 270 |  | 
|  | 271 | /** | 
|  | 272 | * struct d40_interrupt_lookup - lookup table for interrupt handler | 
|  | 273 | * | 
|  | 274 | * @src: Interrupt mask register. | 
|  | 275 | * @clr: Interrupt clear register. | 
|  | 276 | * @is_error: true if this is an error interrupt. | 
|  | 277 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | 
|  | 278 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | 
|  | 279 | */ | 
|  | 280 | struct d40_interrupt_lookup { | 
|  | 281 | u32 src; | 
|  | 282 | u32 clr; | 
|  | 283 | bool is_error; | 
|  | 284 | int offset; | 
|  | 285 | }; | 
|  | 286 |  | 
|  | 287 | /** | 
|  | 288 | * struct d40_reg_val - simple lookup struct | 
|  | 289 | * | 
|  | 290 | * @reg: The register. | 
|  | 291 | * @val: The value that belongs to the register in reg. | 
|  | 292 | */ | 
|  | 293 | struct d40_reg_val { | 
|  | 294 | unsigned int reg; | 
|  | 295 | unsigned int val; | 
|  | 296 | }; | 
|  | 297 |  | 
|  | 298 | static int d40_pool_lli_alloc(struct d40_desc *d40d, | 
|  | 299 | int lli_len, bool is_log) | 
|  | 300 | { | 
|  | 301 | u32 align; | 
|  | 302 | void *base; | 
|  | 303 |  | 
|  | 304 | if (is_log) | 
|  | 305 | align = sizeof(struct d40_log_lli); | 
|  | 306 | else | 
|  | 307 | align = sizeof(struct d40_phy_lli); | 
|  | 308 |  | 
|  | 309 | if (lli_len == 1) { | 
|  | 310 | base = d40d->lli_pool.pre_alloc_lli; | 
|  | 311 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | 
|  | 312 | d40d->lli_pool.base = NULL; | 
|  | 313 | } else { | 
|  | 314 | d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); | 
|  | 315 |  | 
|  | 316 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | 
|  | 317 | d40d->lli_pool.base = base; | 
|  | 318 |  | 
|  | 319 | if (d40d->lli_pool.base == NULL) | 
|  | 320 | return -ENOMEM; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | if (is_log) { | 
|  | 324 | d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, | 
|  | 325 | align); | 
|  | 326 | d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, | 
|  | 327 | align); | 
|  | 328 | } else { | 
|  | 329 | d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, | 
|  | 330 | align); | 
|  | 331 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, | 
|  | 332 | align); | 
|  | 333 |  | 
|  | 334 | d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); | 
|  | 335 | d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | return 0; | 
|  | 339 | } | 
|  | 340 |  | 
|  | 341 | static void d40_pool_lli_free(struct d40_desc *d40d) | 
|  | 342 | { | 
|  | 343 | kfree(d40d->lli_pool.base); | 
|  | 344 | d40d->lli_pool.base = NULL; | 
|  | 345 | d40d->lli_pool.size = 0; | 
|  | 346 | d40d->lli_log.src = NULL; | 
|  | 347 | d40d->lli_log.dst = NULL; | 
|  | 348 | d40d->lli_phy.src = NULL; | 
|  | 349 | d40d->lli_phy.dst = NULL; | 
|  | 350 | d40d->lli_phy.src_addr = 0; | 
|  | 351 | d40d->lli_phy.dst_addr = 0; | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, | 
|  | 355 | struct d40_desc *desc) | 
|  | 356 | { | 
|  | 357 | dma_cookie_t cookie = d40c->chan.cookie; | 
|  | 358 |  | 
|  | 359 | if (++cookie < 0) | 
|  | 360 | cookie = 1; | 
|  | 361 |  | 
|  | 362 | d40c->chan.cookie = cookie; | 
|  | 363 | desc->txd.cookie = cookie; | 
|  | 364 |  | 
|  | 365 | return cookie; | 
|  | 366 | } | 
|  | 367 |  | 
|  | 368 | static void d40_desc_reset(struct d40_desc *d40d) | 
|  | 369 | { | 
|  | 370 | d40d->lli_tcount = 0; | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | static void d40_desc_remove(struct d40_desc *d40d) | 
|  | 374 | { | 
|  | 375 | list_del(&d40d->node); | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | 
|  | 379 | { | 
|  | 380 | struct d40_desc *desc; | 
|  | 381 | struct d40_desc *d; | 
|  | 382 | struct d40_desc *_d; | 
|  | 383 |  | 
|  | 384 | if (!list_empty(&d40c->client)) { | 
|  | 385 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 
|  | 386 | if (async_tx_test_ack(&d->txd)) { | 
|  | 387 | d40_pool_lli_free(d); | 
|  | 388 | d40_desc_remove(d); | 
|  | 389 | desc = d; | 
|  | 390 | goto out; | 
|  | 391 | } | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | if (list_empty(&d40c->free)) { | 
|  | 395 | /* Alloc new desc because we're out of used ones */ | 
|  | 396 | desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT); | 
|  | 397 | if (desc == NULL) | 
|  | 398 | goto out; | 
|  | 399 | INIT_LIST_HEAD(&desc->node); | 
|  | 400 | } else { | 
|  | 401 | /* Reuse an old desc. */ | 
|  | 402 | desc = list_first_entry(&d40c->free, | 
|  | 403 | struct d40_desc, | 
|  | 404 | node); | 
|  | 405 | list_del(&desc->node); | 
|  | 406 | d40c->free_len--; | 
|  | 407 | } | 
|  | 408 | out: | 
|  | 409 | return desc; | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 
|  | 413 | { | 
|  | 414 | if (d40c->free_len < D40_DESC_CACHE_SIZE) { | 
|  | 415 | list_add_tail(&d40d->node, &d40c->free); | 
|  | 416 | d40c->free_len++; | 
|  | 417 | } else | 
|  | 418 | kfree(d40d); | 
|  | 419 | } | 
|  | 420 |  | 
|  | 421 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | 
|  | 422 | { | 
|  | 423 | list_add_tail(&desc->node, &d40c->active); | 
|  | 424 | } | 
|  | 425 |  | 
|  | 426 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | 
|  | 427 | { | 
|  | 428 | struct d40_desc *d; | 
|  | 429 |  | 
|  | 430 | if (list_empty(&d40c->active)) | 
|  | 431 | return NULL; | 
|  | 432 |  | 
|  | 433 | d = list_first_entry(&d40c->active, | 
|  | 434 | struct d40_desc, | 
|  | 435 | node); | 
|  | 436 | return d; | 
|  | 437 | } | 
|  | 438 |  | 
|  | 439 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | 
|  | 440 | { | 
|  | 441 | list_add_tail(&desc->node, &d40c->queue); | 
|  | 442 | } | 
|  | 443 |  | 
|  | 444 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | 
|  | 445 | { | 
|  | 446 | struct d40_desc *d; | 
|  | 447 |  | 
|  | 448 | if (list_empty(&d40c->queue)) | 
|  | 449 | return NULL; | 
|  | 450 |  | 
|  | 451 | d = list_first_entry(&d40c->queue, | 
|  | 452 | struct d40_desc, | 
|  | 453 | node); | 
|  | 454 | return d; | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 | /* Support functions for logical channels */ | 
|  | 458 |  | 
|  | 459 | static int d40_lcla_id_get(struct d40_chan *d40c, | 
|  | 460 | struct d40_lcla_pool *pool) | 
|  | 461 | { | 
|  | 462 | int src_id = 0; | 
|  | 463 | int dst_id = 0; | 
|  | 464 | struct d40_log_lli *lcla_lidx_base = | 
|  | 465 | pool->base + d40c->phy_chan->num * 1024; | 
|  | 466 | int i; | 
|  | 467 | int lli_per_log = d40c->base->plat_data->llis_per_log; | 
|  | 468 |  | 
|  | 469 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) | 
|  | 470 | return 0; | 
|  | 471 |  | 
|  | 472 | if (pool->num_blocks > 32) | 
|  | 473 | return -EINVAL; | 
|  | 474 |  | 
|  | 475 | spin_lock(&pool->lock); | 
|  | 476 |  | 
|  | 477 | for (i = 0; i < pool->num_blocks; i++) { | 
|  | 478 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 
|  | 479 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 
|  | 480 | break; | 
|  | 481 | } | 
|  | 482 | } | 
|  | 483 | src_id = i; | 
|  | 484 | if (src_id >= pool->num_blocks) | 
|  | 485 | goto err; | 
|  | 486 |  | 
|  | 487 | for (; i < pool->num_blocks; i++) { | 
|  | 488 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 
|  | 489 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 
|  | 490 | break; | 
|  | 491 | } | 
|  | 492 | } | 
|  | 493 |  | 
|  | 494 | dst_id = i; | 
|  | 495 | if (dst_id == src_id) | 
|  | 496 | goto err; | 
|  | 497 |  | 
|  | 498 | d40c->lcla.src_id = src_id; | 
|  | 499 | d40c->lcla.dst_id = dst_id; | 
|  | 500 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; | 
|  | 501 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; | 
|  | 502 |  | 
|  | 503 |  | 
|  | 504 | spin_unlock(&pool->lock); | 
|  | 505 | return 0; | 
|  | 506 | err: | 
|  | 507 | spin_unlock(&pool->lock); | 
|  | 508 | return -EINVAL; | 
|  | 509 | } | 
|  | 510 |  | 
|  | 511 | static void d40_lcla_id_put(struct d40_chan *d40c, | 
|  | 512 | struct d40_lcla_pool *pool, | 
|  | 513 | int id) | 
|  | 514 | { | 
|  | 515 | if (id < 0) | 
|  | 516 | return; | 
|  | 517 |  | 
|  | 518 | d40c->lcla.src_id = -1; | 
|  | 519 | d40c->lcla.dst_id = -1; | 
|  | 520 |  | 
|  | 521 | spin_lock(&pool->lock); | 
|  | 522 | pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); | 
|  | 523 | spin_unlock(&pool->lock); | 
|  | 524 | } | 
|  | 525 |  | 
|  | 526 | static int d40_channel_execute_command(struct d40_chan *d40c, | 
|  | 527 | enum d40_command command) | 
|  | 528 | { | 
|  | 529 | int status, i; | 
|  | 530 | void __iomem *active_reg; | 
|  | 531 | int ret = 0; | 
|  | 532 | unsigned long flags; | 
|  | 533 |  | 
|  | 534 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | 
|  | 535 |  | 
|  | 536 | if (d40c->phy_chan->num % 2 == 0) | 
|  | 537 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 
|  | 538 | else | 
|  | 539 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | 
|  | 540 |  | 
|  | 541 | if (command == D40_DMA_SUSPEND_REQ) { | 
|  | 542 | status = (readl(active_reg) & | 
|  | 543 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 
|  | 544 | D40_CHAN_POS(d40c->phy_chan->num); | 
|  | 545 |  | 
|  | 546 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | 
|  | 547 | goto done; | 
|  | 548 | } | 
|  | 549 |  | 
|  | 550 | writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); | 
|  | 551 |  | 
|  | 552 | if (command == D40_DMA_SUSPEND_REQ) { | 
|  | 553 |  | 
|  | 554 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { | 
|  | 555 | status = (readl(active_reg) & | 
|  | 556 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 
|  | 557 | D40_CHAN_POS(d40c->phy_chan->num); | 
|  | 558 |  | 
|  | 559 | cpu_relax(); | 
|  | 560 | /* | 
|  | 561 | * Reduce the number of bus accesses while | 
|  | 562 | * waiting for the DMA to suspend. | 
|  | 563 | */ | 
|  | 564 | udelay(3); | 
|  | 565 |  | 
|  | 566 | if (status == D40_DMA_STOP || | 
|  | 567 | status == D40_DMA_SUSPENDED) | 
|  | 568 | break; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | if (i == D40_SUSPEND_MAX_IT) { | 
|  | 572 | dev_err(&d40c->chan.dev->device, | 
|  | 573 | "[%s]: unable to suspend the chl %d (log: %d) status %x\n", | 
|  | 574 | __func__, d40c->phy_chan->num, d40c->log_num, | 
|  | 575 | status); | 
|  | 576 | dump_stack(); | 
|  | 577 | ret = -EBUSY; | 
|  | 578 | } | 
|  | 579 |  | 
|  | 580 | } | 
|  | 581 | done: | 
|  | 582 | spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); | 
|  | 583 | return ret; | 
|  | 584 | } | 
|  | 585 |  | 
|  | 586 | static void d40_term_all(struct d40_chan *d40c) | 
|  | 587 | { | 
|  | 588 | struct d40_desc *d40d; | 
|  | 589 | struct d40_desc *d; | 
|  | 590 | struct d40_desc *_d; | 
|  | 591 |  | 
|  | 592 | /* Release active descriptors */ | 
|  | 593 | while ((d40d = d40_first_active_get(d40c))) { | 
|  | 594 | d40_desc_remove(d40d); | 
|  | 595 |  | 
|  | 596 | /* Return desc to free-list */ | 
|  | 597 | d40_desc_free(d40c, d40d); | 
|  | 598 | } | 
|  | 599 |  | 
|  | 600 | /* Release queued descriptors waiting for transfer */ | 
|  | 601 | while ((d40d = d40_first_queued(d40c))) { | 
|  | 602 | d40_desc_remove(d40d); | 
|  | 603 |  | 
|  | 604 | /* Return desc to free-list */ | 
|  | 605 | d40_desc_free(d40c, d40d); | 
|  | 606 | } | 
|  | 607 |  | 
|  | 608 | /* Release client owned descriptors */ | 
|  | 609 | if (!list_empty(&d40c->client)) | 
|  | 610 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 
|  | 611 | d40_pool_lli_free(d); | 
|  | 612 | d40_desc_remove(d); | 
|  | 613 | /* Return desc to free-list */ | 
|  | 614 | d40_desc_free(d40c, d40d); | 
|  | 615 | } | 
|  | 616 |  | 
|  | 617 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | 
|  | 618 | d40c->lcla.src_id); | 
|  | 619 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | 
|  | 620 | d40c->lcla.dst_id); | 
|  | 621 |  | 
|  | 622 | d40c->pending_tx = 0; | 
|  | 623 | d40c->busy = false; | 
|  | 624 | } | 
|  | 625 |  | 
|  | 626 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | 
|  | 627 | { | 
|  | 628 | u32 val; | 
|  | 629 | unsigned long flags; | 
|  | 630 |  | 
|  | 631 | if (do_enable) | 
|  | 632 | val = D40_ACTIVATE_EVENTLINE; | 
|  | 633 | else | 
|  | 634 | val = D40_DEACTIVATE_EVENTLINE; | 
|  | 635 |  | 
|  | 636 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | 
|  | 637 |  | 
|  | 638 | /* Enable event line connected to device (or memcpy) */ | 
|  | 639 | if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) || | 
|  | 640 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 
|  | 641 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 
|  | 642 |  | 
|  | 643 | writel((val << D40_EVENTLINE_POS(event)) | | 
|  | 644 | ~D40_EVENTLINE_MASK(event), | 
|  | 645 | d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 646 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 647 | D40_CHAN_REG_SSLNK); | 
|  | 648 | } | 
|  | 649 | if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) { | 
|  | 650 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 
|  | 651 |  | 
|  | 652 | writel((val << D40_EVENTLINE_POS(event)) | | 
|  | 653 | ~D40_EVENTLINE_MASK(event), | 
|  | 654 | d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 655 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 656 | D40_CHAN_REG_SDLNK); | 
|  | 657 | } | 
|  | 658 |  | 
|  | 659 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | 
|  | 660 | } | 
|  | 661 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 662 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 663 | { | 
|  | 664 | u32 val = 0; | 
|  | 665 |  | 
|  | 666 | /* If SSLNK or SDLNK is zero all events are disabled */ | 
|  | 667 | if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) || | 
|  | 668 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | 
|  | 669 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 670 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 671 | D40_CHAN_REG_SSLNK); | 
|  | 672 |  | 
|  | 673 | if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) | 
|  | 674 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 675 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 676 | D40_CHAN_REG_SDLNK); | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 677 | return val; | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 678 | } | 
|  | 679 |  | 
|  | 680 | static void d40_config_enable_lidx(struct d40_chan *d40c) | 
|  | 681 | { | 
|  | 682 | /* Set LIDX for lcla */ | 
|  | 683 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 
|  | 684 | D40_SREG_ELEM_LOG_LIDX_MASK, | 
|  | 685 | d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 686 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | 
|  | 687 |  | 
|  | 688 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 
|  | 689 | D40_SREG_ELEM_LOG_LIDX_MASK, | 
|  | 690 | d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 691 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | static int d40_config_write(struct d40_chan *d40c) | 
|  | 695 | { | 
|  | 696 | u32 addr_base; | 
|  | 697 | u32 var; | 
|  | 698 | int res; | 
|  | 699 |  | 
|  | 700 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 
|  | 701 | if (res) | 
|  | 702 | return res; | 
|  | 703 |  | 
|  | 704 | /* Odd addresses are even addresses + 4 */ | 
|  | 705 | addr_base = (d40c->phy_chan->num % 2) * 4; | 
|  | 706 | /* Setup channel mode to logical or physical */ | 
|  | 707 | var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << | 
|  | 708 | D40_CHAN_POS(d40c->phy_chan->num); | 
|  | 709 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | 
|  | 710 |  | 
|  | 711 | /* Setup operational mode option register */ | 
|  | 712 | var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & | 
|  | 713 | 0x3) << D40_CHAN_POS(d40c->phy_chan->num); | 
|  | 714 |  | 
|  | 715 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | 
|  | 716 |  | 
|  | 717 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 718 | /* Set default config for CFG reg */ | 
|  | 719 | writel(d40c->src_def_cfg, | 
|  | 720 | d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 721 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 722 | D40_CHAN_REG_SSCFG); | 
|  | 723 | writel(d40c->dst_def_cfg, | 
|  | 724 | d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 725 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 726 | D40_CHAN_REG_SDCFG); | 
|  | 727 |  | 
|  | 728 | d40_config_enable_lidx(d40c); | 
|  | 729 | } | 
|  | 730 | return res; | 
|  | 731 | } | 
|  | 732 |  | 
|  | 733 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 
|  | 734 | { | 
|  | 735 |  | 
|  | 736 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { | 
|  | 737 | d40_phy_lli_write(d40c->base->virtbase, | 
|  | 738 | d40c->phy_chan->num, | 
|  | 739 | d40d->lli_phy.dst, | 
|  | 740 | d40d->lli_phy.src); | 
|  | 741 | d40d->lli_tcount = d40d->lli_len; | 
|  | 742 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { | 
|  | 743 | u32 lli_len; | 
|  | 744 | struct d40_log_lli *src = d40d->lli_log.src; | 
|  | 745 | struct d40_log_lli *dst = d40d->lli_log.dst; | 
|  | 746 |  | 
|  | 747 | src += d40d->lli_tcount; | 
|  | 748 | dst += d40d->lli_tcount; | 
|  | 749 |  | 
|  | 750 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) | 
|  | 751 | lli_len = d40d->lli_len; | 
|  | 752 | else | 
|  | 753 | lli_len = d40c->base->plat_data->llis_per_log; | 
|  | 754 | d40d->lli_tcount += lli_len; | 
|  | 755 | d40_log_lli_write(d40c->lcpa, d40c->lcla.src, | 
|  | 756 | d40c->lcla.dst, | 
|  | 757 | dst, src, | 
|  | 758 | d40c->base->plat_data->llis_per_log); | 
|  | 759 | } | 
|  | 760 | } | 
|  | 761 |  | 
|  | 762 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 
|  | 763 | { | 
|  | 764 | struct d40_chan *d40c = container_of(tx->chan, | 
|  | 765 | struct d40_chan, | 
|  | 766 | chan); | 
|  | 767 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 
|  | 768 | unsigned long flags; | 
|  | 769 |  | 
|  | 770 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 771 |  | 
|  | 772 | tx->cookie = d40_assign_cookie(d40c, d40d); | 
|  | 773 |  | 
|  | 774 | d40_desc_queue(d40c, d40d); | 
|  | 775 |  | 
|  | 776 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 777 |  | 
|  | 778 | return tx->cookie; | 
|  | 779 | } | 
|  | 780 |  | 
|  | 781 | static int d40_start(struct d40_chan *d40c) | 
|  | 782 | { | 
|  | 783 | int err; | 
|  | 784 |  | 
|  | 785 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 786 | err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 
|  | 787 | if (err) | 
|  | 788 | return err; | 
|  | 789 | d40_config_set_event(d40c, true); | 
|  | 790 | } | 
|  | 791 |  | 
|  | 792 | err = d40_channel_execute_command(d40c, D40_DMA_RUN); | 
|  | 793 |  | 
|  | 794 | return err; | 
|  | 795 | } | 
|  | 796 |  | 
|  | 797 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | 
|  | 798 | { | 
|  | 799 | struct d40_desc *d40d; | 
|  | 800 | int err; | 
|  | 801 |  | 
|  | 802 | /* Start queued jobs, if any */ | 
|  | 803 | d40d = d40_first_queued(d40c); | 
|  | 804 |  | 
|  | 805 | if (d40d != NULL) { | 
|  | 806 | d40c->busy = true; | 
|  | 807 |  | 
|  | 808 | /* Remove from queue */ | 
|  | 809 | d40_desc_remove(d40d); | 
|  | 810 |  | 
|  | 811 | /* Add to active queue */ | 
|  | 812 | d40_desc_submit(d40c, d40d); | 
|  | 813 |  | 
|  | 814 | /* Initiate DMA job */ | 
|  | 815 | d40_desc_load(d40c, d40d); | 
|  | 816 |  | 
|  | 817 | /* Start dma job */ | 
|  | 818 | err = d40_start(d40c); | 
|  | 819 |  | 
|  | 820 | if (err) | 
|  | 821 | return NULL; | 
|  | 822 | } | 
|  | 823 |  | 
|  | 824 | return d40d; | 
|  | 825 | } | 
|  | 826 |  | 
|  | 827 | /* called from interrupt context */ | 
|  | 828 | static void dma_tc_handle(struct d40_chan *d40c) | 
|  | 829 | { | 
|  | 830 | struct d40_desc *d40d; | 
|  | 831 |  | 
|  | 832 | if (!d40c->phy_chan) | 
|  | 833 | return; | 
|  | 834 |  | 
|  | 835 | /* Get first active entry from list */ | 
|  | 836 | d40d = d40_first_active_get(d40c); | 
|  | 837 |  | 
|  | 838 | if (d40d == NULL) | 
|  | 839 | return; | 
|  | 840 |  | 
|  | 841 | if (d40d->lli_tcount < d40d->lli_len) { | 
|  | 842 |  | 
|  | 843 | d40_desc_load(d40c, d40d); | 
|  | 844 | /* Start dma job */ | 
|  | 845 | (void) d40_start(d40c); | 
|  | 846 | return; | 
|  | 847 | } | 
|  | 848 |  | 
|  | 849 | if (d40_queue_start(d40c) == NULL) | 
|  | 850 | d40c->busy = false; | 
|  | 851 |  | 
|  | 852 | d40c->pending_tx++; | 
|  | 853 | tasklet_schedule(&d40c->tasklet); | 
|  | 854 |  | 
|  | 855 | } | 
|  | 856 |  | 
|  | 857 | static void dma_tasklet(unsigned long data) | 
|  | 858 | { | 
|  | 859 | struct d40_chan *d40c = (struct d40_chan *) data; | 
|  | 860 | struct d40_desc *d40d_fin; | 
|  | 861 | unsigned long flags; | 
|  | 862 | dma_async_tx_callback callback; | 
|  | 863 | void *callback_param; | 
|  | 864 |  | 
|  | 865 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 866 |  | 
|  | 867 | /* Get first active entry from list */ | 
|  | 868 | d40d_fin = d40_first_active_get(d40c); | 
|  | 869 |  | 
|  | 870 | if (d40d_fin == NULL) | 
|  | 871 | goto err; | 
|  | 872 |  | 
|  | 873 | d40c->completed = d40d_fin->txd.cookie; | 
|  | 874 |  | 
|  | 875 | /* | 
|  | 876 | * If terminating a channel pending_tx is set to zero. | 
|  | 877 | * This prevents any finished active jobs to return to the client. | 
|  | 878 | */ | 
|  | 879 | if (d40c->pending_tx == 0) { | 
|  | 880 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 881 | return; | 
|  | 882 | } | 
|  | 883 |  | 
|  | 884 | /* Callback to client */ | 
|  | 885 | callback = d40d_fin->txd.callback; | 
|  | 886 | callback_param = d40d_fin->txd.callback_param; | 
|  | 887 |  | 
|  | 888 | if (async_tx_test_ack(&d40d_fin->txd)) { | 
|  | 889 | d40_pool_lli_free(d40d_fin); | 
|  | 890 | d40_desc_remove(d40d_fin); | 
|  | 891 | /* Return desc to free-list */ | 
|  | 892 | d40_desc_free(d40c, d40d_fin); | 
|  | 893 | } else { | 
|  | 894 | d40_desc_reset(d40d_fin); | 
|  | 895 | if (!d40d_fin->is_in_client_list) { | 
|  | 896 | d40_desc_remove(d40d_fin); | 
|  | 897 | list_add_tail(&d40d_fin->node, &d40c->client); | 
|  | 898 | d40d_fin->is_in_client_list = true; | 
|  | 899 | } | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | d40c->pending_tx--; | 
|  | 903 |  | 
|  | 904 | if (d40c->pending_tx) | 
|  | 905 | tasklet_schedule(&d40c->tasklet); | 
|  | 906 |  | 
|  | 907 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 908 |  | 
|  | 909 | if (callback) | 
|  | 910 | callback(callback_param); | 
|  | 911 |  | 
|  | 912 | return; | 
|  | 913 |  | 
|  | 914 | err: | 
|  | 915 | /* Rescue manouver if receiving double interrupts */ | 
|  | 916 | if (d40c->pending_tx > 0) | 
|  | 917 | d40c->pending_tx--; | 
|  | 918 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 919 | } | 
|  | 920 |  | 
|  | 921 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | 
|  | 922 | { | 
|  | 923 | static const struct d40_interrupt_lookup il[] = { | 
|  | 924 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0}, | 
|  | 925 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | 
|  | 926 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | 
|  | 927 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | 
|  | 928 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0}, | 
|  | 929 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32}, | 
|  | 930 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64}, | 
|  | 931 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96}, | 
|  | 932 | {D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN}, | 
|  | 933 | {D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN}, | 
|  | 934 | }; | 
|  | 935 |  | 
|  | 936 | int i; | 
|  | 937 | u32 regs[ARRAY_SIZE(il)]; | 
|  | 938 | u32 tmp; | 
|  | 939 | u32 idx; | 
|  | 940 | u32 row; | 
|  | 941 | long chan = -1; | 
|  | 942 | struct d40_chan *d40c; | 
|  | 943 | unsigned long flags; | 
|  | 944 | struct d40_base *base = data; | 
|  | 945 |  | 
|  | 946 | spin_lock_irqsave(&base->interrupt_lock, flags); | 
|  | 947 |  | 
|  | 948 | /* Read interrupt status of both logical and physical channels */ | 
|  | 949 | for (i = 0; i < ARRAY_SIZE(il); i++) | 
|  | 950 | regs[i] = readl(base->virtbase + il[i].src); | 
|  | 951 |  | 
|  | 952 | for (;;) { | 
|  | 953 |  | 
|  | 954 | chan = find_next_bit((unsigned long *)regs, | 
|  | 955 | BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); | 
|  | 956 |  | 
|  | 957 | /* No more set bits found? */ | 
|  | 958 | if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) | 
|  | 959 | break; | 
|  | 960 |  | 
|  | 961 | row = chan / BITS_PER_LONG; | 
|  | 962 | idx = chan & (BITS_PER_LONG - 1); | 
|  | 963 |  | 
|  | 964 | /* ACK interrupt */ | 
|  | 965 | tmp = readl(base->virtbase + il[row].clr); | 
|  | 966 | tmp |= 1 << idx; | 
|  | 967 | writel(tmp, base->virtbase + il[row].clr); | 
|  | 968 |  | 
|  | 969 | if (il[row].offset == D40_PHY_CHAN) | 
|  | 970 | d40c = base->lookup_phy_chans[idx]; | 
|  | 971 | else | 
|  | 972 | d40c = base->lookup_log_chans[il[row].offset + idx]; | 
|  | 973 | spin_lock(&d40c->lock); | 
|  | 974 |  | 
|  | 975 | if (!il[row].is_error) | 
|  | 976 | dma_tc_handle(d40c); | 
|  | 977 | else | 
|  | 978 | dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", | 
|  | 979 | __func__, chan, il[row].offset, idx); | 
|  | 980 |  | 
|  | 981 | spin_unlock(&d40c->lock); | 
|  | 982 | } | 
|  | 983 |  | 
|  | 984 | spin_unlock_irqrestore(&base->interrupt_lock, flags); | 
|  | 985 |  | 
|  | 986 | return IRQ_HANDLED; | 
|  | 987 | } | 
|  | 988 |  | 
|  | 989 |  | 
|  | 990 | static int d40_validate_conf(struct d40_chan *d40c, | 
|  | 991 | struct stedma40_chan_cfg *conf) | 
|  | 992 | { | 
|  | 993 | int res = 0; | 
|  | 994 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | 
|  | 995 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | 
|  | 996 | bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 
|  | 997 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 
|  | 998 |  | 
|  | 999 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && | 
|  | 1000 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | 
|  | 1001 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", | 
|  | 1002 | __func__); | 
|  | 1003 | res = -EINVAL; | 
|  | 1004 | } | 
|  | 1005 |  | 
|  | 1006 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && | 
|  | 1007 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | 
|  | 1008 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", | 
|  | 1009 | __func__); | 
|  | 1010 | res = -EINVAL; | 
|  | 1011 | } | 
|  | 1012 |  | 
|  | 1013 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | 
|  | 1014 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | 
|  | 1015 | dev_err(&d40c->chan.dev->device, | 
|  | 1016 | "[%s] No event line\n", __func__); | 
|  | 1017 | res = -EINVAL; | 
|  | 1018 | } | 
|  | 1019 |  | 
|  | 1020 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | 
|  | 1021 | (src_event_group != dst_event_group)) { | 
|  | 1022 | dev_err(&d40c->chan.dev->device, | 
|  | 1023 | "[%s] Invalid event group\n", __func__); | 
|  | 1024 | res = -EINVAL; | 
|  | 1025 | } | 
|  | 1026 |  | 
|  | 1027 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { | 
|  | 1028 | /* | 
|  | 1029 | * DMAC HW supports it. Will be added to this driver, | 
|  | 1030 | * in case any dma client requires it. | 
|  | 1031 | */ | 
|  | 1032 | dev_err(&d40c->chan.dev->device, | 
|  | 1033 | "[%s] periph to periph not supported\n", | 
|  | 1034 | __func__); | 
|  | 1035 | res = -EINVAL; | 
|  | 1036 | } | 
|  | 1037 |  | 
|  | 1038 | return res; | 
|  | 1039 | } | 
|  | 1040 |  | 
|  | 1041 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1042 | int log_event_line, bool is_log) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1043 | { | 
|  | 1044 | unsigned long flags; | 
|  | 1045 | spin_lock_irqsave(&phy->lock, flags); | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1046 | if (!is_log) { | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1047 | /* Physical interrupts are masked per physical full channel */ | 
|  | 1048 | if (phy->allocated_src == D40_ALLOC_FREE && | 
|  | 1049 | phy->allocated_dst == D40_ALLOC_FREE) { | 
|  | 1050 | phy->allocated_dst = D40_ALLOC_PHY; | 
|  | 1051 | phy->allocated_src = D40_ALLOC_PHY; | 
|  | 1052 | goto found; | 
|  | 1053 | } else | 
|  | 1054 | goto not_found; | 
|  | 1055 | } | 
|  | 1056 |  | 
|  | 1057 | /* Logical channel */ | 
|  | 1058 | if (is_src) { | 
|  | 1059 | if (phy->allocated_src == D40_ALLOC_PHY) | 
|  | 1060 | goto not_found; | 
|  | 1061 |  | 
|  | 1062 | if (phy->allocated_src == D40_ALLOC_FREE) | 
|  | 1063 | phy->allocated_src = D40_ALLOC_LOG_FREE; | 
|  | 1064 |  | 
|  | 1065 | if (!(phy->allocated_src & (1 << log_event_line))) { | 
|  | 1066 | phy->allocated_src |= 1 << log_event_line; | 
|  | 1067 | goto found; | 
|  | 1068 | } else | 
|  | 1069 | goto not_found; | 
|  | 1070 | } else { | 
|  | 1071 | if (phy->allocated_dst == D40_ALLOC_PHY) | 
|  | 1072 | goto not_found; | 
|  | 1073 |  | 
|  | 1074 | if (phy->allocated_dst == D40_ALLOC_FREE) | 
|  | 1075 | phy->allocated_dst = D40_ALLOC_LOG_FREE; | 
|  | 1076 |  | 
|  | 1077 | if (!(phy->allocated_dst & (1 << log_event_line))) { | 
|  | 1078 | phy->allocated_dst |= 1 << log_event_line; | 
|  | 1079 | goto found; | 
|  | 1080 | } else | 
|  | 1081 | goto not_found; | 
|  | 1082 | } | 
|  | 1083 |  | 
|  | 1084 | not_found: | 
|  | 1085 | spin_unlock_irqrestore(&phy->lock, flags); | 
|  | 1086 | return false; | 
|  | 1087 | found: | 
|  | 1088 | spin_unlock_irqrestore(&phy->lock, flags); | 
|  | 1089 | return true; | 
|  | 1090 | } | 
|  | 1091 |  | 
|  | 1092 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | 
|  | 1093 | int log_event_line) | 
|  | 1094 | { | 
|  | 1095 | unsigned long flags; | 
|  | 1096 | bool is_free = false; | 
|  | 1097 |  | 
|  | 1098 | spin_lock_irqsave(&phy->lock, flags); | 
|  | 1099 | if (!log_event_line) { | 
|  | 1100 | /* Physical interrupts are masked per physical full channel */ | 
|  | 1101 | phy->allocated_dst = D40_ALLOC_FREE; | 
|  | 1102 | phy->allocated_src = D40_ALLOC_FREE; | 
|  | 1103 | is_free = true; | 
|  | 1104 | goto out; | 
|  | 1105 | } | 
|  | 1106 |  | 
|  | 1107 | /* Logical channel */ | 
|  | 1108 | if (is_src) { | 
|  | 1109 | phy->allocated_src &= ~(1 << log_event_line); | 
|  | 1110 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) | 
|  | 1111 | phy->allocated_src = D40_ALLOC_FREE; | 
|  | 1112 | } else { | 
|  | 1113 | phy->allocated_dst &= ~(1 << log_event_line); | 
|  | 1114 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) | 
|  | 1115 | phy->allocated_dst = D40_ALLOC_FREE; | 
|  | 1116 | } | 
|  | 1117 |  | 
|  | 1118 | is_free = ((phy->allocated_src | phy->allocated_dst) == | 
|  | 1119 | D40_ALLOC_FREE); | 
|  | 1120 |  | 
|  | 1121 | out: | 
|  | 1122 | spin_unlock_irqrestore(&phy->lock, flags); | 
|  | 1123 |  | 
|  | 1124 | return is_free; | 
|  | 1125 | } | 
|  | 1126 |  | 
|  | 1127 | static int d40_allocate_channel(struct d40_chan *d40c) | 
|  | 1128 | { | 
|  | 1129 | int dev_type; | 
|  | 1130 | int event_group; | 
|  | 1131 | int event_line; | 
|  | 1132 | struct d40_phy_res *phys; | 
|  | 1133 | int i; | 
|  | 1134 | int j; | 
|  | 1135 | int log_num; | 
|  | 1136 | bool is_src; | 
|  | 1137 | bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 
|  | 1138 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 
|  | 1139 |  | 
|  | 1140 |  | 
|  | 1141 | phys = d40c->base->phy_res; | 
|  | 1142 |  | 
|  | 1143 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 
|  | 1144 | dev_type = d40c->dma_cfg.src_dev_type; | 
|  | 1145 | log_num = 2 * dev_type; | 
|  | 1146 | is_src = true; | 
|  | 1147 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 
|  | 1148 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 
|  | 1149 | /* dst event lines are used for logical memcpy */ | 
|  | 1150 | dev_type = d40c->dma_cfg.dst_dev_type; | 
|  | 1151 | log_num = 2 * dev_type + 1; | 
|  | 1152 | is_src = false; | 
|  | 1153 | } else | 
|  | 1154 | return -EINVAL; | 
|  | 1155 |  | 
|  | 1156 | event_group = D40_TYPE_TO_GROUP(dev_type); | 
|  | 1157 | event_line = D40_TYPE_TO_EVENT(dev_type); | 
|  | 1158 |  | 
|  | 1159 | if (!is_log) { | 
|  | 1160 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 
|  | 1161 | /* Find physical half channel */ | 
|  | 1162 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 
|  | 1163 |  | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1164 | if (d40_alloc_mask_set(&phys[i], is_src, | 
|  | 1165 | 0, is_log)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1166 | goto found_phy; | 
|  | 1167 | } | 
|  | 1168 | } else | 
|  | 1169 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 
|  | 1170 | int phy_num = j  + event_group * 2; | 
|  | 1171 | for (i = phy_num; i < phy_num + 2; i++) { | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1172 | if (d40_alloc_mask_set(&phys[i], is_src, | 
|  | 1173 | 0, is_log)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1174 | goto found_phy; | 
|  | 1175 | } | 
|  | 1176 | } | 
|  | 1177 | return -EINVAL; | 
|  | 1178 | found_phy: | 
|  | 1179 | d40c->phy_chan = &phys[i]; | 
|  | 1180 | d40c->log_num = D40_PHY_CHAN; | 
|  | 1181 | goto out; | 
|  | 1182 | } | 
|  | 1183 | if (dev_type == -1) | 
|  | 1184 | return -EINVAL; | 
|  | 1185 |  | 
|  | 1186 | /* Find logical channel */ | 
|  | 1187 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 
|  | 1188 | int phy_num = j + event_group * 2; | 
|  | 1189 | /* | 
|  | 1190 | * Spread logical channels across all available physical rather | 
|  | 1191 | * than pack every logical channel at the first available phy | 
|  | 1192 | * channels. | 
|  | 1193 | */ | 
|  | 1194 | if (is_src) { | 
|  | 1195 | for (i = phy_num; i < phy_num + 2; i++) { | 
|  | 1196 | if (d40_alloc_mask_set(&phys[i], is_src, | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1197 | event_line, is_log)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1198 | goto found_log; | 
|  | 1199 | } | 
|  | 1200 | } else { | 
|  | 1201 | for (i = phy_num + 1; i >= phy_num; i--) { | 
|  | 1202 | if (d40_alloc_mask_set(&phys[i], is_src, | 
| Marcin Mielczarczyk | 4aed79b | 2010-05-18 00:41:21 +0200 | [diff] [blame] | 1203 | event_line, is_log)) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1204 | goto found_log; | 
|  | 1205 | } | 
|  | 1206 | } | 
|  | 1207 | } | 
|  | 1208 | return -EINVAL; | 
|  | 1209 |  | 
|  | 1210 | found_log: | 
|  | 1211 | d40c->phy_chan = &phys[i]; | 
|  | 1212 | d40c->log_num = log_num; | 
|  | 1213 | out: | 
|  | 1214 |  | 
|  | 1215 | if (is_log) | 
|  | 1216 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; | 
|  | 1217 | else | 
|  | 1218 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; | 
|  | 1219 |  | 
|  | 1220 | return 0; | 
|  | 1221 |  | 
|  | 1222 | } | 
|  | 1223 |  | 
|  | 1224 | static int d40_config_chan(struct d40_chan *d40c, | 
|  | 1225 | struct stedma40_chan_cfg *info) | 
|  | 1226 | { | 
|  | 1227 |  | 
|  | 1228 | /* Fill in basic CFG register values */ | 
|  | 1229 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 
|  | 1230 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); | 
|  | 1231 |  | 
|  | 1232 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 1233 | d40_log_cfg(&d40c->dma_cfg, | 
|  | 1234 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 
|  | 1235 |  | 
|  | 1236 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 
|  | 1237 | d40c->lcpa = d40c->base->lcpa_base + | 
|  | 1238 | d40c->dma_cfg.src_dev_type * 32; | 
|  | 1239 | else | 
|  | 1240 | d40c->lcpa = d40c->base->lcpa_base + | 
|  | 1241 | d40c->dma_cfg.dst_dev_type * 32 + 16; | 
|  | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | /* Write channel configuration to the DMA */ | 
|  | 1245 | return d40_config_write(d40c); | 
|  | 1246 | } | 
|  | 1247 |  | 
|  | 1248 | static int d40_config_memcpy(struct d40_chan *d40c) | 
|  | 1249 | { | 
|  | 1250 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; | 
|  | 1251 |  | 
|  | 1252 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { | 
|  | 1253 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; | 
|  | 1254 | d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; | 
|  | 1255 | d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> | 
|  | 1256 | memcpy[d40c->chan.chan_id]; | 
|  | 1257 |  | 
|  | 1258 | } else if (dma_has_cap(DMA_MEMCPY, cap) && | 
|  | 1259 | dma_has_cap(DMA_SLAVE, cap)) { | 
|  | 1260 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | 
|  | 1261 | } else { | 
|  | 1262 | dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", | 
|  | 1263 | __func__); | 
|  | 1264 | return -EINVAL; | 
|  | 1265 | } | 
|  | 1266 |  | 
|  | 1267 | return 0; | 
|  | 1268 | } | 
|  | 1269 |  | 
|  | 1270 |  | 
|  | 1271 | static int d40_free_dma(struct d40_chan *d40c) | 
|  | 1272 | { | 
|  | 1273 |  | 
|  | 1274 | int res = 0; | 
|  | 1275 | u32 event, dir; | 
|  | 1276 | struct d40_phy_res *phy = d40c->phy_chan; | 
|  | 1277 | bool is_src; | 
|  | 1278 |  | 
|  | 1279 | /* Terminate all queued and active transfers */ | 
|  | 1280 | d40_term_all(d40c); | 
|  | 1281 |  | 
|  | 1282 | if (phy == NULL) { | 
|  | 1283 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", | 
|  | 1284 | __func__); | 
|  | 1285 | return -EINVAL; | 
|  | 1286 | } | 
|  | 1287 |  | 
|  | 1288 | if (phy->allocated_src == D40_ALLOC_FREE && | 
|  | 1289 | phy->allocated_dst == D40_ALLOC_FREE) { | 
|  | 1290 | dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", | 
|  | 1291 | __func__); | 
|  | 1292 | return -EINVAL; | 
|  | 1293 | } | 
|  | 1294 |  | 
|  | 1295 |  | 
|  | 1296 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 
|  | 1297 | if (res) { | 
|  | 1298 | dev_err(&d40c->chan.dev->device, "[%s] suspend\n", | 
|  | 1299 | __func__); | 
|  | 1300 | return res; | 
|  | 1301 | } | 
|  | 1302 |  | 
|  | 1303 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 
|  | 1304 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 
|  | 1305 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 
|  | 1306 | dir = D40_CHAN_REG_SDLNK; | 
|  | 1307 | is_src = false; | 
|  | 1308 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 
|  | 1309 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 
|  | 1310 | dir = D40_CHAN_REG_SSLNK; | 
|  | 1311 | is_src = true; | 
|  | 1312 | } else { | 
|  | 1313 | dev_err(&d40c->chan.dev->device, | 
|  | 1314 | "[%s] Unknown direction\n", __func__); | 
|  | 1315 | return -EINVAL; | 
|  | 1316 | } | 
|  | 1317 |  | 
|  | 1318 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 1319 | /* | 
|  | 1320 | * Release logical channel, deactivate the event line during | 
|  | 1321 | * the time physical res is suspended. | 
|  | 1322 | */ | 
|  | 1323 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & | 
|  | 1324 | D40_EVENTLINE_MASK(event), | 
|  | 1325 | d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 1326 | phy->num * D40_DREG_PCDELTA + dir); | 
|  | 1327 |  | 
|  | 1328 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; | 
|  | 1329 |  | 
|  | 1330 | /* | 
|  | 1331 | * Check if there are more logical allocation | 
|  | 1332 | * on this phy channel. | 
|  | 1333 | */ | 
|  | 1334 | if (!d40_alloc_mask_free(phy, is_src, event)) { | 
|  | 1335 | /* Resume the other logical channels if any */ | 
|  | 1336 | if (d40_chan_has_events(d40c)) { | 
|  | 1337 | res = d40_channel_execute_command(d40c, | 
|  | 1338 | D40_DMA_RUN); | 
|  | 1339 | if (res) { | 
|  | 1340 | dev_err(&d40c->chan.dev->device, | 
|  | 1341 | "[%s] Executing RUN command\n", | 
|  | 1342 | __func__); | 
|  | 1343 | return res; | 
|  | 1344 | } | 
|  | 1345 | } | 
|  | 1346 | return 0; | 
|  | 1347 | } | 
|  | 1348 | } else | 
|  | 1349 | d40_alloc_mask_free(phy, is_src, 0); | 
|  | 1350 |  | 
|  | 1351 | /* Release physical channel */ | 
|  | 1352 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 
|  | 1353 | if (res) { | 
|  | 1354 | dev_err(&d40c->chan.dev->device, | 
|  | 1355 | "[%s] Failed to stop channel\n", __func__); | 
|  | 1356 | return res; | 
|  | 1357 | } | 
|  | 1358 | d40c->phy_chan = NULL; | 
|  | 1359 | /* Invalidate channel type */ | 
|  | 1360 | d40c->dma_cfg.channel_type = 0; | 
|  | 1361 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 
|  | 1362 |  | 
|  | 1363 | return 0; | 
|  | 1364 |  | 
|  | 1365 |  | 
|  | 1366 | } | 
|  | 1367 |  | 
|  | 1368 | static int d40_pause(struct dma_chan *chan) | 
|  | 1369 | { | 
|  | 1370 | struct d40_chan *d40c = | 
|  | 1371 | container_of(chan, struct d40_chan, chan); | 
|  | 1372 | int res; | 
|  | 1373 |  | 
|  | 1374 | unsigned long flags; | 
|  | 1375 |  | 
|  | 1376 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 1377 |  | 
|  | 1378 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 
|  | 1379 | if (res == 0) { | 
|  | 1380 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 1381 | d40_config_set_event(d40c, false); | 
|  | 1382 | /* Resume the other logical channels if any */ | 
|  | 1383 | if (d40_chan_has_events(d40c)) | 
|  | 1384 | res = d40_channel_execute_command(d40c, | 
|  | 1385 | D40_DMA_RUN); | 
|  | 1386 | } | 
|  | 1387 | } | 
|  | 1388 |  | 
|  | 1389 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1390 | return res; | 
|  | 1391 | } | 
|  | 1392 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 1393 | static bool d40_is_paused(struct d40_chan *d40c) | 
|  | 1394 | { | 
|  | 1395 | bool is_paused = false; | 
|  | 1396 | unsigned long flags; | 
|  | 1397 | void __iomem *active_reg; | 
|  | 1398 | u32 status; | 
|  | 1399 | u32 event; | 
|  | 1400 | int res; | 
|  | 1401 |  | 
|  | 1402 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 1403 |  | 
|  | 1404 | if (d40c->log_num == D40_PHY_CHAN) { | 
|  | 1405 | if (d40c->phy_chan->num % 2 == 0) | 
|  | 1406 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 
|  | 1407 | else | 
|  | 1408 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | 
|  | 1409 |  | 
|  | 1410 | status = (readl(active_reg) & | 
|  | 1411 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 
|  | 1412 | D40_CHAN_POS(d40c->phy_chan->num); | 
|  | 1413 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | 
|  | 1414 | is_paused = true; | 
|  | 1415 |  | 
|  | 1416 | goto _exit; | 
|  | 1417 | } | 
|  | 1418 |  | 
|  | 1419 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 
|  | 1420 | if (res != 0) | 
|  | 1421 | goto _exit; | 
|  | 1422 |  | 
|  | 1423 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 
|  | 1424 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) | 
|  | 1425 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 
|  | 1426 | else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 
|  | 1427 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 
|  | 1428 | else { | 
|  | 1429 | dev_err(&d40c->chan.dev->device, | 
|  | 1430 | "[%s] Unknown direction\n", __func__); | 
|  | 1431 | goto _exit; | 
|  | 1432 | } | 
|  | 1433 | status = d40_chan_has_events(d40c); | 
|  | 1434 | status = (status & D40_EVENTLINE_MASK(event)) >> | 
|  | 1435 | D40_EVENTLINE_POS(event); | 
|  | 1436 |  | 
|  | 1437 | if (status != D40_DMA_RUN) | 
|  | 1438 | is_paused = true; | 
|  | 1439 |  | 
|  | 1440 | /* Resume the other logical channels if any */ | 
|  | 1441 | if (d40_chan_has_events(d40c)) | 
|  | 1442 | res = d40_channel_execute_command(d40c, | 
|  | 1443 | D40_DMA_RUN); | 
|  | 1444 |  | 
|  | 1445 | _exit: | 
|  | 1446 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1447 | return is_paused; | 
|  | 1448 |  | 
|  | 1449 | } | 
|  | 1450 |  | 
|  | 1451 |  | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 1452 | static bool d40_tx_is_linked(struct d40_chan *d40c) | 
|  | 1453 | { | 
|  | 1454 | bool is_link; | 
|  | 1455 |  | 
|  | 1456 | if (d40c->log_num != D40_PHY_CHAN) | 
|  | 1457 | is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK; | 
|  | 1458 | else | 
|  | 1459 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 1460 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 1461 | D40_CHAN_REG_SDLNK) & | 
|  | 1462 | D40_SREG_LNK_PHYS_LNK_MASK; | 
|  | 1463 | return is_link; | 
|  | 1464 | } | 
|  | 1465 |  | 
|  | 1466 | static u32 d40_residue(struct d40_chan *d40c) | 
|  | 1467 | { | 
|  | 1468 | u32 num_elt; | 
|  | 1469 |  | 
|  | 1470 | if (d40c->log_num != D40_PHY_CHAN) | 
|  | 1471 | num_elt = (readl(&d40c->lcpa->lcsp2) &  D40_MEM_LCSP2_ECNT_MASK) | 
|  | 1472 | >> D40_MEM_LCSP2_ECNT_POS; | 
|  | 1473 | else | 
|  | 1474 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | 
|  | 1475 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 
|  | 1476 | D40_CHAN_REG_SDELT) & | 
|  | 1477 | D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; | 
|  | 1478 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | 
|  | 1479 | } | 
|  | 1480 |  | 
|  | 1481 | static int d40_resume(struct dma_chan *chan) | 
|  | 1482 | { | 
|  | 1483 | struct d40_chan *d40c = | 
|  | 1484 | container_of(chan, struct d40_chan, chan); | 
|  | 1485 | int res = 0; | 
|  | 1486 | unsigned long flags; | 
|  | 1487 |  | 
|  | 1488 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 1489 |  | 
|  | 1490 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 1491 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 
|  | 1492 | if (res) | 
|  | 1493 | goto out; | 
|  | 1494 |  | 
|  | 1495 | /* If bytes left to transfer or linked tx resume job */ | 
|  | 1496 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | 
|  | 1497 | d40_config_set_event(d40c, true); | 
|  | 1498 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 
|  | 1499 | } | 
|  | 1500 | } else if (d40_residue(d40c) || d40_tx_is_linked(d40c)) | 
|  | 1501 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 
|  | 1502 |  | 
|  | 1503 | out: | 
|  | 1504 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1505 | return res; | 
|  | 1506 | } | 
|  | 1507 |  | 
|  | 1508 | static u32 stedma40_residue(struct dma_chan *chan) | 
|  | 1509 | { | 
|  | 1510 | struct d40_chan *d40c = | 
|  | 1511 | container_of(chan, struct d40_chan, chan); | 
|  | 1512 | u32 bytes_left; | 
|  | 1513 | unsigned long flags; | 
|  | 1514 |  | 
|  | 1515 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 1516 | bytes_left = d40_residue(d40c); | 
|  | 1517 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1518 |  | 
|  | 1519 | return bytes_left; | 
|  | 1520 | } | 
|  | 1521 |  | 
|  | 1522 | /* Public DMA functions in addition to the DMA engine framework */ | 
|  | 1523 |  | 
|  | 1524 | int stedma40_set_psize(struct dma_chan *chan, | 
|  | 1525 | int src_psize, | 
|  | 1526 | int dst_psize) | 
|  | 1527 | { | 
|  | 1528 | struct d40_chan *d40c = | 
|  | 1529 | container_of(chan, struct d40_chan, chan); | 
|  | 1530 | unsigned long flags; | 
|  | 1531 |  | 
|  | 1532 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 1533 |  | 
|  | 1534 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 1535 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 
|  | 1536 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 
|  | 1537 | d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 
|  | 1538 | d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 
|  | 1539 | goto out; | 
|  | 1540 | } | 
|  | 1541 |  | 
|  | 1542 | if (src_psize == STEDMA40_PSIZE_PHY_1) | 
|  | 1543 | d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | 
|  | 1544 | else { | 
|  | 1545 | d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | 
|  | 1546 | d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | 
|  | 1547 | D40_SREG_CFG_PSIZE_POS); | 
|  | 1548 | d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; | 
|  | 1549 | } | 
|  | 1550 |  | 
|  | 1551 | if (dst_psize == STEDMA40_PSIZE_PHY_1) | 
|  | 1552 | d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | 
|  | 1553 | else { | 
|  | 1554 | d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | 
|  | 1555 | d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | 
|  | 1556 | D40_SREG_CFG_PSIZE_POS); | 
|  | 1557 | d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; | 
|  | 1558 | } | 
|  | 1559 | out: | 
|  | 1560 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1561 | return 0; | 
|  | 1562 | } | 
|  | 1563 | EXPORT_SYMBOL(stedma40_set_psize); | 
|  | 1564 |  | 
|  | 1565 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | 
|  | 1566 | struct scatterlist *sgl_dst, | 
|  | 1567 | struct scatterlist *sgl_src, | 
|  | 1568 | unsigned int sgl_len, | 
|  | 1569 | unsigned long flags) | 
|  | 1570 | { | 
|  | 1571 | int res; | 
|  | 1572 | struct d40_desc *d40d; | 
|  | 1573 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 
|  | 1574 | chan); | 
|  | 1575 | unsigned long flg; | 
|  | 1576 | int lli_max = d40c->base->plat_data->llis_per_log; | 
|  | 1577 |  | 
|  | 1578 |  | 
|  | 1579 | spin_lock_irqsave(&d40c->lock, flg); | 
|  | 1580 | d40d = d40_desc_get(d40c); | 
|  | 1581 |  | 
|  | 1582 | if (d40d == NULL) | 
|  | 1583 | goto err; | 
|  | 1584 |  | 
|  | 1585 | memset(d40d, 0, sizeof(struct d40_desc)); | 
|  | 1586 | d40d->lli_len = sgl_len; | 
|  | 1587 |  | 
|  | 1588 | d40d->txd.flags = flags; | 
|  | 1589 |  | 
|  | 1590 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 1591 | if (sgl_len > 1) | 
|  | 1592 | /* | 
|  | 1593 | * Check if there is space available in lcla. If not, | 
|  | 1594 | * split list into 1-length and run only in lcpa | 
|  | 1595 | * space. | 
|  | 1596 | */ | 
|  | 1597 | if (d40_lcla_id_get(d40c, | 
|  | 1598 | &d40c->base->lcla_pool) != 0) | 
|  | 1599 | lli_max = 1; | 
|  | 1600 |  | 
|  | 1601 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 
|  | 1602 | dev_err(&d40c->chan.dev->device, | 
|  | 1603 | "[%s] Out of memory\n", __func__); | 
|  | 1604 | goto err; | 
|  | 1605 | } | 
|  | 1606 |  | 
|  | 1607 | (void) d40_log_sg_to_lli(d40c->lcla.src_id, | 
|  | 1608 | sgl_src, | 
|  | 1609 | sgl_len, | 
|  | 1610 | d40d->lli_log.src, | 
|  | 1611 | d40c->log_def.lcsp1, | 
|  | 1612 | d40c->dma_cfg.src_info.data_width, | 
|  | 1613 | flags & DMA_PREP_INTERRUPT, lli_max, | 
|  | 1614 | d40c->base->plat_data->llis_per_log); | 
|  | 1615 |  | 
|  | 1616 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, | 
|  | 1617 | sgl_dst, | 
|  | 1618 | sgl_len, | 
|  | 1619 | d40d->lli_log.dst, | 
|  | 1620 | d40c->log_def.lcsp3, | 
|  | 1621 | d40c->dma_cfg.dst_info.data_width, | 
|  | 1622 | flags & DMA_PREP_INTERRUPT, lli_max, | 
|  | 1623 | d40c->base->plat_data->llis_per_log); | 
|  | 1624 |  | 
|  | 1625 |  | 
|  | 1626 | } else { | 
|  | 1627 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 
|  | 1628 | dev_err(&d40c->chan.dev->device, | 
|  | 1629 | "[%s] Out of memory\n", __func__); | 
|  | 1630 | goto err; | 
|  | 1631 | } | 
|  | 1632 |  | 
|  | 1633 | res = d40_phy_sg_to_lli(sgl_src, | 
|  | 1634 | sgl_len, | 
|  | 1635 | 0, | 
|  | 1636 | d40d->lli_phy.src, | 
|  | 1637 | d40d->lli_phy.src_addr, | 
|  | 1638 | d40c->src_def_cfg, | 
|  | 1639 | d40c->dma_cfg.src_info.data_width, | 
|  | 1640 | d40c->dma_cfg.src_info.psize, | 
|  | 1641 | true); | 
|  | 1642 |  | 
|  | 1643 | if (res < 0) | 
|  | 1644 | goto err; | 
|  | 1645 |  | 
|  | 1646 | res = d40_phy_sg_to_lli(sgl_dst, | 
|  | 1647 | sgl_len, | 
|  | 1648 | 0, | 
|  | 1649 | d40d->lli_phy.dst, | 
|  | 1650 | d40d->lli_phy.dst_addr, | 
|  | 1651 | d40c->dst_def_cfg, | 
|  | 1652 | d40c->dma_cfg.dst_info.data_width, | 
|  | 1653 | d40c->dma_cfg.dst_info.psize, | 
|  | 1654 | true); | 
|  | 1655 |  | 
|  | 1656 | if (res < 0) | 
|  | 1657 | goto err; | 
|  | 1658 |  | 
|  | 1659 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 
|  | 1660 | d40d->lli_pool.size, DMA_TO_DEVICE); | 
|  | 1661 | } | 
|  | 1662 |  | 
|  | 1663 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 
|  | 1664 |  | 
|  | 1665 | d40d->txd.tx_submit = d40_tx_submit; | 
|  | 1666 |  | 
|  | 1667 | spin_unlock_irqrestore(&d40c->lock, flg); | 
|  | 1668 |  | 
|  | 1669 | return &d40d->txd; | 
|  | 1670 | err: | 
|  | 1671 | spin_unlock_irqrestore(&d40c->lock, flg); | 
|  | 1672 | return NULL; | 
|  | 1673 | } | 
|  | 1674 | EXPORT_SYMBOL(stedma40_memcpy_sg); | 
|  | 1675 |  | 
|  | 1676 | bool stedma40_filter(struct dma_chan *chan, void *data) | 
|  | 1677 | { | 
|  | 1678 | struct stedma40_chan_cfg *info = data; | 
|  | 1679 | struct d40_chan *d40c = | 
|  | 1680 | container_of(chan, struct d40_chan, chan); | 
|  | 1681 | int err; | 
|  | 1682 |  | 
|  | 1683 | if (data) { | 
|  | 1684 | err = d40_validate_conf(d40c, info); | 
|  | 1685 | if (!err) | 
|  | 1686 | d40c->dma_cfg = *info; | 
|  | 1687 | } else | 
|  | 1688 | err = d40_config_memcpy(d40c); | 
|  | 1689 |  | 
|  | 1690 | return err == 0; | 
|  | 1691 | } | 
|  | 1692 | EXPORT_SYMBOL(stedma40_filter); | 
|  | 1693 |  | 
|  | 1694 | /* DMA ENGINE functions */ | 
|  | 1695 | static int d40_alloc_chan_resources(struct dma_chan *chan) | 
|  | 1696 | { | 
|  | 1697 | int err; | 
|  | 1698 | unsigned long flags; | 
|  | 1699 | struct d40_chan *d40c = | 
|  | 1700 | container_of(chan, struct d40_chan, chan); | 
|  | 1701 |  | 
|  | 1702 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 1703 |  | 
|  | 1704 | d40c->completed = chan->cookie = 1; | 
|  | 1705 |  | 
|  | 1706 | /* | 
|  | 1707 | * If no dma configuration is set (channel_type == 0) | 
|  | 1708 | * use default configuration | 
|  | 1709 | */ | 
|  | 1710 | if (d40c->dma_cfg.channel_type == 0) { | 
|  | 1711 | err = d40_config_memcpy(d40c); | 
|  | 1712 | if (err) | 
|  | 1713 | goto err_alloc; | 
|  | 1714 | } | 
|  | 1715 |  | 
|  | 1716 | err = d40_allocate_channel(d40c); | 
|  | 1717 | if (err) { | 
|  | 1718 | dev_err(&d40c->chan.dev->device, | 
|  | 1719 | "[%s] Failed to allocate channel\n", __func__); | 
|  | 1720 | goto err_alloc; | 
|  | 1721 | } | 
|  | 1722 |  | 
|  | 1723 | err = d40_config_chan(d40c, &d40c->dma_cfg); | 
|  | 1724 | if (err) { | 
|  | 1725 | dev_err(&d40c->chan.dev->device, | 
|  | 1726 | "[%s] Failed to configure channel\n", | 
|  | 1727 | __func__); | 
|  | 1728 | goto err_config; | 
|  | 1729 | } | 
|  | 1730 |  | 
|  | 1731 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1732 | return 0; | 
|  | 1733 |  | 
|  | 1734 | err_config: | 
|  | 1735 | (void) d40_free_dma(d40c); | 
|  | 1736 | err_alloc: | 
|  | 1737 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1738 | dev_err(&d40c->chan.dev->device, | 
|  | 1739 | "[%s] Channel allocation failed\n", __func__); | 
|  | 1740 | return -EINVAL; | 
|  | 1741 | } | 
|  | 1742 |  | 
|  | 1743 | static void d40_free_chan_resources(struct dma_chan *chan) | 
|  | 1744 | { | 
|  | 1745 | struct d40_chan *d40c = | 
|  | 1746 | container_of(chan, struct d40_chan, chan); | 
|  | 1747 | int err; | 
|  | 1748 | unsigned long flags; | 
|  | 1749 |  | 
|  | 1750 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 1751 |  | 
|  | 1752 | err = d40_free_dma(d40c); | 
|  | 1753 |  | 
|  | 1754 | if (err) | 
|  | 1755 | dev_err(&d40c->chan.dev->device, | 
|  | 1756 | "[%s] Failed to free channel\n", __func__); | 
|  | 1757 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 1758 | } | 
|  | 1759 |  | 
|  | 1760 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | 
|  | 1761 | dma_addr_t dst, | 
|  | 1762 | dma_addr_t src, | 
|  | 1763 | size_t size, | 
|  | 1764 | unsigned long flags) | 
|  | 1765 | { | 
|  | 1766 | struct d40_desc *d40d; | 
|  | 1767 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 
|  | 1768 | chan); | 
|  | 1769 | unsigned long flg; | 
|  | 1770 | int err = 0; | 
|  | 1771 |  | 
|  | 1772 | spin_lock_irqsave(&d40c->lock, flg); | 
|  | 1773 | d40d = d40_desc_get(d40c); | 
|  | 1774 |  | 
|  | 1775 | if (d40d == NULL) { | 
|  | 1776 | dev_err(&d40c->chan.dev->device, | 
|  | 1777 | "[%s] Descriptor is NULL\n", __func__); | 
|  | 1778 | goto err; | 
|  | 1779 | } | 
|  | 1780 |  | 
|  | 1781 | memset(d40d, 0, sizeof(struct d40_desc)); | 
|  | 1782 |  | 
|  | 1783 | d40d->txd.flags = flags; | 
|  | 1784 |  | 
|  | 1785 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 
|  | 1786 |  | 
|  | 1787 | d40d->txd.tx_submit = d40_tx_submit; | 
|  | 1788 |  | 
|  | 1789 | if (d40c->log_num != D40_PHY_CHAN) { | 
|  | 1790 |  | 
|  | 1791 | if (d40_pool_lli_alloc(d40d, 1, true) < 0) { | 
|  | 1792 | dev_err(&d40c->chan.dev->device, | 
|  | 1793 | "[%s] Out of memory\n", __func__); | 
|  | 1794 | goto err; | 
|  | 1795 | } | 
|  | 1796 | d40d->lli_len = 1; | 
|  | 1797 |  | 
|  | 1798 | d40_log_fill_lli(d40d->lli_log.src, | 
|  | 1799 | src, | 
|  | 1800 | size, | 
|  | 1801 | 0, | 
|  | 1802 | d40c->log_def.lcsp1, | 
|  | 1803 | d40c->dma_cfg.src_info.data_width, | 
|  | 1804 | true, true); | 
|  | 1805 |  | 
|  | 1806 | d40_log_fill_lli(d40d->lli_log.dst, | 
|  | 1807 | dst, | 
|  | 1808 | size, | 
|  | 1809 | 0, | 
|  | 1810 | d40c->log_def.lcsp3, | 
|  | 1811 | d40c->dma_cfg.dst_info.data_width, | 
|  | 1812 | true, true); | 
|  | 1813 |  | 
|  | 1814 | } else { | 
|  | 1815 |  | 
|  | 1816 | if (d40_pool_lli_alloc(d40d, 1, false) < 0) { | 
|  | 1817 | dev_err(&d40c->chan.dev->device, | 
|  | 1818 | "[%s] Out of memory\n", __func__); | 
|  | 1819 | goto err; | 
|  | 1820 | } | 
|  | 1821 |  | 
|  | 1822 | err = d40_phy_fill_lli(d40d->lli_phy.src, | 
|  | 1823 | src, | 
|  | 1824 | size, | 
|  | 1825 | d40c->dma_cfg.src_info.psize, | 
|  | 1826 | 0, | 
|  | 1827 | d40c->src_def_cfg, | 
|  | 1828 | true, | 
|  | 1829 | d40c->dma_cfg.src_info.data_width, | 
|  | 1830 | false); | 
|  | 1831 | if (err) | 
|  | 1832 | goto err_fill_lli; | 
|  | 1833 |  | 
|  | 1834 | err = d40_phy_fill_lli(d40d->lli_phy.dst, | 
|  | 1835 | dst, | 
|  | 1836 | size, | 
|  | 1837 | d40c->dma_cfg.dst_info.psize, | 
|  | 1838 | 0, | 
|  | 1839 | d40c->dst_def_cfg, | 
|  | 1840 | true, | 
|  | 1841 | d40c->dma_cfg.dst_info.data_width, | 
|  | 1842 | false); | 
|  | 1843 |  | 
|  | 1844 | if (err) | 
|  | 1845 | goto err_fill_lli; | 
|  | 1846 |  | 
|  | 1847 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 
|  | 1848 | d40d->lli_pool.size, DMA_TO_DEVICE); | 
|  | 1849 | } | 
|  | 1850 |  | 
|  | 1851 | spin_unlock_irqrestore(&d40c->lock, flg); | 
|  | 1852 | return &d40d->txd; | 
|  | 1853 |  | 
|  | 1854 | err_fill_lli: | 
|  | 1855 | dev_err(&d40c->chan.dev->device, | 
|  | 1856 | "[%s] Failed filling in PHY LLI\n", __func__); | 
|  | 1857 | d40_pool_lli_free(d40d); | 
|  | 1858 | err: | 
|  | 1859 | spin_unlock_irqrestore(&d40c->lock, flg); | 
|  | 1860 | return NULL; | 
|  | 1861 | } | 
|  | 1862 |  | 
|  | 1863 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, | 
|  | 1864 | struct d40_chan *d40c, | 
|  | 1865 | struct scatterlist *sgl, | 
|  | 1866 | unsigned int sg_len, | 
|  | 1867 | enum dma_data_direction direction, | 
|  | 1868 | unsigned long flags) | 
|  | 1869 | { | 
|  | 1870 | dma_addr_t dev_addr = 0; | 
|  | 1871 | int total_size; | 
|  | 1872 | int lli_max = d40c->base->plat_data->llis_per_log; | 
|  | 1873 |  | 
|  | 1874 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { | 
|  | 1875 | dev_err(&d40c->chan.dev->device, | 
|  | 1876 | "[%s] Out of memory\n", __func__); | 
|  | 1877 | return -ENOMEM; | 
|  | 1878 | } | 
|  | 1879 |  | 
|  | 1880 | d40d->lli_len = sg_len; | 
|  | 1881 | d40d->lli_tcount = 0; | 
|  | 1882 |  | 
|  | 1883 | if (sg_len > 1) | 
|  | 1884 | /* | 
|  | 1885 | * Check if there is space available in lcla. | 
|  | 1886 | * If not, split list into 1-length and run only | 
|  | 1887 | * in lcpa space. | 
|  | 1888 | */ | 
|  | 1889 | if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) | 
|  | 1890 | lli_max = 1; | 
|  | 1891 |  | 
|  | 1892 | if (direction == DMA_FROM_DEVICE) { | 
|  | 1893 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | 
|  | 1894 | total_size = d40_log_sg_to_dev(&d40c->lcla, | 
|  | 1895 | sgl, sg_len, | 
|  | 1896 | &d40d->lli_log, | 
|  | 1897 | &d40c->log_def, | 
|  | 1898 | d40c->dma_cfg.src_info.data_width, | 
|  | 1899 | d40c->dma_cfg.dst_info.data_width, | 
|  | 1900 | direction, | 
|  | 1901 | flags & DMA_PREP_INTERRUPT, | 
|  | 1902 | dev_addr, lli_max, | 
|  | 1903 | d40c->base->plat_data->llis_per_log); | 
|  | 1904 | } else if (direction == DMA_TO_DEVICE) { | 
|  | 1905 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | 
|  | 1906 | total_size = d40_log_sg_to_dev(&d40c->lcla, | 
|  | 1907 | sgl, sg_len, | 
|  | 1908 | &d40d->lli_log, | 
|  | 1909 | &d40c->log_def, | 
|  | 1910 | d40c->dma_cfg.src_info.data_width, | 
|  | 1911 | d40c->dma_cfg.dst_info.data_width, | 
|  | 1912 | direction, | 
|  | 1913 | flags & DMA_PREP_INTERRUPT, | 
|  | 1914 | dev_addr, lli_max, | 
|  | 1915 | d40c->base->plat_data->llis_per_log); | 
|  | 1916 | } else | 
|  | 1917 | return -EINVAL; | 
|  | 1918 | if (total_size < 0) | 
|  | 1919 | return -EINVAL; | 
|  | 1920 |  | 
|  | 1921 | return 0; | 
|  | 1922 | } | 
|  | 1923 |  | 
|  | 1924 | static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | 
|  | 1925 | struct d40_chan *d40c, | 
|  | 1926 | struct scatterlist *sgl, | 
|  | 1927 | unsigned int sgl_len, | 
|  | 1928 | enum dma_data_direction direction, | 
|  | 1929 | unsigned long flags) | 
|  | 1930 | { | 
|  | 1931 | dma_addr_t src_dev_addr; | 
|  | 1932 | dma_addr_t dst_dev_addr; | 
|  | 1933 | int res; | 
|  | 1934 |  | 
|  | 1935 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 
|  | 1936 | dev_err(&d40c->chan.dev->device, | 
|  | 1937 | "[%s] Out of memory\n", __func__); | 
|  | 1938 | return -ENOMEM; | 
|  | 1939 | } | 
|  | 1940 |  | 
|  | 1941 | d40d->lli_len = sgl_len; | 
|  | 1942 | d40d->lli_tcount = 0; | 
|  | 1943 |  | 
|  | 1944 | if (direction == DMA_FROM_DEVICE) { | 
|  | 1945 | dst_dev_addr = 0; | 
|  | 1946 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | 
|  | 1947 | } else if (direction == DMA_TO_DEVICE) { | 
|  | 1948 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | 
|  | 1949 | src_dev_addr = 0; | 
|  | 1950 | } else | 
|  | 1951 | return -EINVAL; | 
|  | 1952 |  | 
|  | 1953 | res = d40_phy_sg_to_lli(sgl, | 
|  | 1954 | sgl_len, | 
|  | 1955 | src_dev_addr, | 
|  | 1956 | d40d->lli_phy.src, | 
|  | 1957 | d40d->lli_phy.src_addr, | 
|  | 1958 | d40c->src_def_cfg, | 
|  | 1959 | d40c->dma_cfg.src_info.data_width, | 
|  | 1960 | d40c->dma_cfg.src_info.psize, | 
|  | 1961 | true); | 
|  | 1962 | if (res < 0) | 
|  | 1963 | return res; | 
|  | 1964 |  | 
|  | 1965 | res = d40_phy_sg_to_lli(sgl, | 
|  | 1966 | sgl_len, | 
|  | 1967 | dst_dev_addr, | 
|  | 1968 | d40d->lli_phy.dst, | 
|  | 1969 | d40d->lli_phy.dst_addr, | 
|  | 1970 | d40c->dst_def_cfg, | 
|  | 1971 | d40c->dma_cfg.dst_info.data_width, | 
|  | 1972 | d40c->dma_cfg.dst_info.psize, | 
|  | 1973 | true); | 
|  | 1974 | if (res < 0) | 
|  | 1975 | return res; | 
|  | 1976 |  | 
|  | 1977 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 
|  | 1978 | d40d->lli_pool.size, DMA_TO_DEVICE); | 
|  | 1979 | return 0; | 
|  | 1980 | } | 
|  | 1981 |  | 
|  | 1982 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 
|  | 1983 | struct scatterlist *sgl, | 
|  | 1984 | unsigned int sg_len, | 
|  | 1985 | enum dma_data_direction direction, | 
|  | 1986 | unsigned long flags) | 
|  | 1987 | { | 
|  | 1988 | struct d40_desc *d40d; | 
|  | 1989 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 
|  | 1990 | chan); | 
|  | 1991 | unsigned long flg; | 
|  | 1992 | int err; | 
|  | 1993 |  | 
|  | 1994 | if (d40c->dma_cfg.pre_transfer) | 
|  | 1995 | d40c->dma_cfg.pre_transfer(chan, | 
|  | 1996 | d40c->dma_cfg.pre_transfer_data, | 
|  | 1997 | sg_dma_len(sgl)); | 
|  | 1998 |  | 
|  | 1999 | spin_lock_irqsave(&d40c->lock, flg); | 
|  | 2000 | d40d = d40_desc_get(d40c); | 
|  | 2001 | spin_unlock_irqrestore(&d40c->lock, flg); | 
|  | 2002 |  | 
|  | 2003 | if (d40d == NULL) | 
|  | 2004 | return NULL; | 
|  | 2005 |  | 
|  | 2006 | memset(d40d, 0, sizeof(struct d40_desc)); | 
|  | 2007 |  | 
|  | 2008 | if (d40c->log_num != D40_PHY_CHAN) | 
|  | 2009 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | 
|  | 2010 | direction, flags); | 
|  | 2011 | else | 
|  | 2012 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, | 
|  | 2013 | direction, flags); | 
|  | 2014 | if (err) { | 
|  | 2015 | dev_err(&d40c->chan.dev->device, | 
|  | 2016 | "[%s] Failed to prepare %s slave sg job: %d\n", | 
|  | 2017 | __func__, | 
|  | 2018 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); | 
|  | 2019 | return NULL; | 
|  | 2020 | } | 
|  | 2021 |  | 
|  | 2022 | d40d->txd.flags = flags; | 
|  | 2023 |  | 
|  | 2024 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 
|  | 2025 |  | 
|  | 2026 | d40d->txd.tx_submit = d40_tx_submit; | 
|  | 2027 |  | 
|  | 2028 | return &d40d->txd; | 
|  | 2029 | } | 
|  | 2030 |  | 
|  | 2031 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 
|  | 2032 | dma_cookie_t cookie, | 
|  | 2033 | struct dma_tx_state *txstate) | 
|  | 2034 | { | 
|  | 2035 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
|  | 2036 | dma_cookie_t last_used; | 
|  | 2037 | dma_cookie_t last_complete; | 
|  | 2038 | int ret; | 
|  | 2039 |  | 
|  | 2040 | last_complete = d40c->completed; | 
|  | 2041 | last_used = chan->cookie; | 
|  | 2042 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 2043 | if (d40_is_paused(d40c)) | 
|  | 2044 | ret = DMA_PAUSED; | 
|  | 2045 | else | 
|  | 2046 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2047 |  | 
| Jonas Aaberg | a5ebca4 | 2010-05-18 00:41:09 +0200 | [diff] [blame] | 2048 | dma_set_tx_state(txstate, last_complete, last_used, | 
|  | 2049 | stedma40_residue(chan)); | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2050 |  | 
|  | 2051 | return ret; | 
|  | 2052 | } | 
|  | 2053 |  | 
|  | 2054 | static void d40_issue_pending(struct dma_chan *chan) | 
|  | 2055 | { | 
|  | 2056 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
|  | 2057 | unsigned long flags; | 
|  | 2058 |  | 
|  | 2059 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 2060 |  | 
|  | 2061 | /* Busy means that pending jobs are already being processed */ | 
|  | 2062 | if (!d40c->busy) | 
|  | 2063 | (void) d40_queue_start(d40c); | 
|  | 2064 |  | 
|  | 2065 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 2066 | } | 
|  | 2067 |  | 
| Linus Walleij | 0582763 | 2010-05-17 16:30:42 -0700 | [diff] [blame] | 2068 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 
|  | 2069 | unsigned long arg) | 
| Linus Walleij | 8d318a5 | 2010-03-30 15:33:42 +0200 | [diff] [blame] | 2070 | { | 
|  | 2071 | unsigned long flags; | 
|  | 2072 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 
|  | 2073 |  | 
|  | 2074 | switch (cmd) { | 
|  | 2075 | case DMA_TERMINATE_ALL: | 
|  | 2076 | spin_lock_irqsave(&d40c->lock, flags); | 
|  | 2077 | d40_term_all(d40c); | 
|  | 2078 | spin_unlock_irqrestore(&d40c->lock, flags); | 
|  | 2079 | return 0; | 
|  | 2080 | case DMA_PAUSE: | 
|  | 2081 | return d40_pause(chan); | 
|  | 2082 | case DMA_RESUME: | 
|  | 2083 | return d40_resume(chan); | 
|  | 2084 | } | 
|  | 2085 |  | 
|  | 2086 | /* Other commands are unimplemented */ | 
|  | 2087 | return -ENXIO; | 
|  | 2088 | } | 
|  | 2089 |  | 
|  | 2090 | /* Initialization functions */ | 
|  | 2091 |  | 
|  | 2092 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | 
|  | 2093 | struct d40_chan *chans, int offset, | 
|  | 2094 | int num_chans) | 
|  | 2095 | { | 
|  | 2096 | int i = 0; | 
|  | 2097 | struct d40_chan *d40c; | 
|  | 2098 |  | 
|  | 2099 | INIT_LIST_HEAD(&dma->channels); | 
|  | 2100 |  | 
|  | 2101 | for (i = offset; i < offset + num_chans; i++) { | 
|  | 2102 | d40c = &chans[i]; | 
|  | 2103 | d40c->base = base; | 
|  | 2104 | d40c->chan.device = dma; | 
|  | 2105 |  | 
|  | 2106 | /* Invalidate lcla element */ | 
|  | 2107 | d40c->lcla.src_id = -1; | 
|  | 2108 | d40c->lcla.dst_id = -1; | 
|  | 2109 |  | 
|  | 2110 | spin_lock_init(&d40c->lock); | 
|  | 2111 |  | 
|  | 2112 | d40c->log_num = D40_PHY_CHAN; | 
|  | 2113 |  | 
|  | 2114 | INIT_LIST_HEAD(&d40c->free); | 
|  | 2115 | INIT_LIST_HEAD(&d40c->active); | 
|  | 2116 | INIT_LIST_HEAD(&d40c->queue); | 
|  | 2117 | INIT_LIST_HEAD(&d40c->client); | 
|  | 2118 |  | 
|  | 2119 | d40c->free_len = 0; | 
|  | 2120 |  | 
|  | 2121 | tasklet_init(&d40c->tasklet, dma_tasklet, | 
|  | 2122 | (unsigned long) d40c); | 
|  | 2123 |  | 
|  | 2124 | list_add_tail(&d40c->chan.device_node, | 
|  | 2125 | &dma->channels); | 
|  | 2126 | } | 
|  | 2127 | } | 
|  | 2128 |  | 
|  | 2129 | static int __init d40_dmaengine_init(struct d40_base *base, | 
|  | 2130 | int num_reserved_chans) | 
|  | 2131 | { | 
|  | 2132 | int err ; | 
|  | 2133 |  | 
|  | 2134 | d40_chan_init(base, &base->dma_slave, base->log_chans, | 
|  | 2135 | 0, base->num_log_chans); | 
|  | 2136 |  | 
|  | 2137 | dma_cap_zero(base->dma_slave.cap_mask); | 
|  | 2138 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 
|  | 2139 |  | 
|  | 2140 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; | 
|  | 2141 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; | 
|  | 2142 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; | 
|  | 2143 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; | 
|  | 2144 | base->dma_slave.device_tx_status = d40_tx_status; | 
|  | 2145 | base->dma_slave.device_issue_pending = d40_issue_pending; | 
|  | 2146 | base->dma_slave.device_control = d40_control; | 
|  | 2147 | base->dma_slave.dev = base->dev; | 
|  | 2148 |  | 
|  | 2149 | err = dma_async_device_register(&base->dma_slave); | 
|  | 2150 |  | 
|  | 2151 | if (err) { | 
|  | 2152 | dev_err(base->dev, | 
|  | 2153 | "[%s] Failed to register slave channels\n", | 
|  | 2154 | __func__); | 
|  | 2155 | goto failure1; | 
|  | 2156 | } | 
|  | 2157 |  | 
|  | 2158 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, | 
|  | 2159 | base->num_log_chans, base->plat_data->memcpy_len); | 
|  | 2160 |  | 
|  | 2161 | dma_cap_zero(base->dma_memcpy.cap_mask); | 
|  | 2162 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 
|  | 2163 |  | 
|  | 2164 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; | 
|  | 2165 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; | 
|  | 2166 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; | 
|  | 2167 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; | 
|  | 2168 | base->dma_memcpy.device_tx_status = d40_tx_status; | 
|  | 2169 | base->dma_memcpy.device_issue_pending = d40_issue_pending; | 
|  | 2170 | base->dma_memcpy.device_control = d40_control; | 
|  | 2171 | base->dma_memcpy.dev = base->dev; | 
|  | 2172 | /* | 
|  | 2173 | * This controller can only access address at even | 
|  | 2174 | * 32bit boundaries, i.e. 2^2 | 
|  | 2175 | */ | 
|  | 2176 | base->dma_memcpy.copy_align = 2; | 
|  | 2177 |  | 
|  | 2178 | err = dma_async_device_register(&base->dma_memcpy); | 
|  | 2179 |  | 
|  | 2180 | if (err) { | 
|  | 2181 | dev_err(base->dev, | 
|  | 2182 | "[%s] Failed to regsiter memcpy only channels\n", | 
|  | 2183 | __func__); | 
|  | 2184 | goto failure2; | 
|  | 2185 | } | 
|  | 2186 |  | 
|  | 2187 | d40_chan_init(base, &base->dma_both, base->phy_chans, | 
|  | 2188 | 0, num_reserved_chans); | 
|  | 2189 |  | 
|  | 2190 | dma_cap_zero(base->dma_both.cap_mask); | 
|  | 2191 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 
|  | 2192 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 
|  | 2193 |  | 
|  | 2194 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; | 
|  | 2195 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; | 
|  | 2196 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; | 
|  | 2197 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; | 
|  | 2198 | base->dma_both.device_tx_status = d40_tx_status; | 
|  | 2199 | base->dma_both.device_issue_pending = d40_issue_pending; | 
|  | 2200 | base->dma_both.device_control = d40_control; | 
|  | 2201 | base->dma_both.dev = base->dev; | 
|  | 2202 | base->dma_both.copy_align = 2; | 
|  | 2203 | err = dma_async_device_register(&base->dma_both); | 
|  | 2204 |  | 
|  | 2205 | if (err) { | 
|  | 2206 | dev_err(base->dev, | 
|  | 2207 | "[%s] Failed to register logical and physical capable channels\n", | 
|  | 2208 | __func__); | 
|  | 2209 | goto failure3; | 
|  | 2210 | } | 
|  | 2211 | return 0; | 
|  | 2212 | failure3: | 
|  | 2213 | dma_async_device_unregister(&base->dma_memcpy); | 
|  | 2214 | failure2: | 
|  | 2215 | dma_async_device_unregister(&base->dma_slave); | 
|  | 2216 | failure1: | 
|  | 2217 | return err; | 
|  | 2218 | } | 
|  | 2219 |  | 
|  | 2220 | /* Initialization functions. */ | 
|  | 2221 |  | 
|  | 2222 | static int __init d40_phy_res_init(struct d40_base *base) | 
|  | 2223 | { | 
|  | 2224 | int i; | 
|  | 2225 | int num_phy_chans_avail = 0; | 
|  | 2226 | u32 val[2]; | 
|  | 2227 | int odd_even_bit = -2; | 
|  | 2228 |  | 
|  | 2229 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 
|  | 2230 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 
|  | 2231 |  | 
|  | 2232 | for (i = 0; i < base->num_phy_chans; i++) { | 
|  | 2233 | base->phy_res[i].num = i; | 
|  | 2234 | odd_even_bit += 2 * ((i % 2) == 0); | 
|  | 2235 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { | 
|  | 2236 | /* Mark security only channels as occupied */ | 
|  | 2237 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 
|  | 2238 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 
|  | 2239 | } else { | 
|  | 2240 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 
|  | 2241 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 
|  | 2242 | num_phy_chans_avail++; | 
|  | 2243 | } | 
|  | 2244 | spin_lock_init(&base->phy_res[i].lock); | 
|  | 2245 | } | 
|  | 2246 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | 
|  | 2247 | num_phy_chans_avail, base->num_phy_chans); | 
|  | 2248 |  | 
|  | 2249 | /* Verify settings extended vs standard */ | 
|  | 2250 | val[0] = readl(base->virtbase + D40_DREG_PRTYP); | 
|  | 2251 |  | 
|  | 2252 | for (i = 0; i < base->num_phy_chans; i++) { | 
|  | 2253 |  | 
|  | 2254 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && | 
|  | 2255 | (val[0] & 0x3) != 1) | 
|  | 2256 | dev_info(base->dev, | 
|  | 2257 | "[%s] INFO: channel %d is misconfigured (%d)\n", | 
|  | 2258 | __func__, i, val[0] & 0x3); | 
|  | 2259 |  | 
|  | 2260 | val[0] = val[0] >> 2; | 
|  | 2261 | } | 
|  | 2262 |  | 
|  | 2263 | return num_phy_chans_avail; | 
|  | 2264 | } | 
|  | 2265 |  | 
|  | 2266 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 
|  | 2267 | { | 
|  | 2268 | static const struct d40_reg_val dma_id_regs[] = { | 
|  | 2269 | /* Peripheral Id */ | 
|  | 2270 | { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, | 
|  | 2271 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | 
|  | 2272 | /* | 
|  | 2273 | * D40_DREG_PERIPHID2 Depends on HW revision: | 
|  | 2274 | *  MOP500/HREF ED has 0x0008, | 
|  | 2275 | *  ? has 0x0018, | 
|  | 2276 | *  HREF V1 has 0x0028 | 
|  | 2277 | */ | 
|  | 2278 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | 
|  | 2279 |  | 
|  | 2280 | /* PCell Id */ | 
|  | 2281 | { .reg = D40_DREG_CELLID0, .val = 0x000d}, | 
|  | 2282 | { .reg = D40_DREG_CELLID1, .val = 0x00f0}, | 
|  | 2283 | { .reg = D40_DREG_CELLID2, .val = 0x0005}, | 
|  | 2284 | { .reg = D40_DREG_CELLID3, .val = 0x00b1} | 
|  | 2285 | }; | 
|  | 2286 | struct stedma40_platform_data *plat_data; | 
|  | 2287 | struct clk *clk = NULL; | 
|  | 2288 | void __iomem *virtbase = NULL; | 
|  | 2289 | struct resource *res = NULL; | 
|  | 2290 | struct d40_base *base = NULL; | 
|  | 2291 | int num_log_chans = 0; | 
|  | 2292 | int num_phy_chans; | 
|  | 2293 | int i; | 
|  | 2294 |  | 
|  | 2295 | clk = clk_get(&pdev->dev, NULL); | 
|  | 2296 |  | 
|  | 2297 | if (IS_ERR(clk)) { | 
|  | 2298 | dev_err(&pdev->dev, "[%s] No matching clock found\n", | 
|  | 2299 | __func__); | 
|  | 2300 | goto failure; | 
|  | 2301 | } | 
|  | 2302 |  | 
|  | 2303 | clk_enable(clk); | 
|  | 2304 |  | 
|  | 2305 | /* Get IO for DMAC base address */ | 
|  | 2306 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | 
|  | 2307 | if (!res) | 
|  | 2308 | goto failure; | 
|  | 2309 |  | 
|  | 2310 | if (request_mem_region(res->start, resource_size(res), | 
|  | 2311 | D40_NAME " I/O base") == NULL) | 
|  | 2312 | goto failure; | 
|  | 2313 |  | 
|  | 2314 | virtbase = ioremap(res->start, resource_size(res)); | 
|  | 2315 | if (!virtbase) | 
|  | 2316 | goto failure; | 
|  | 2317 |  | 
|  | 2318 | /* HW version check */ | 
|  | 2319 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 
|  | 2320 | if (dma_id_regs[i].val != | 
|  | 2321 | readl(virtbase + dma_id_regs[i].reg)) { | 
|  | 2322 | dev_err(&pdev->dev, | 
|  | 2323 | "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 
|  | 2324 | __func__, | 
|  | 2325 | dma_id_regs[i].val, | 
|  | 2326 | dma_id_regs[i].reg, | 
|  | 2327 | readl(virtbase + dma_id_regs[i].reg)); | 
|  | 2328 | goto failure; | 
|  | 2329 | } | 
|  | 2330 | } | 
|  | 2331 |  | 
|  | 2332 | i = readl(virtbase + D40_DREG_PERIPHID2); | 
|  | 2333 |  | 
|  | 2334 | if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { | 
|  | 2335 | dev_err(&pdev->dev, | 
|  | 2336 | "[%s] Unknown designer! Got %x wanted %x\n", | 
|  | 2337 | __func__, i & 0xf, D40_PERIPHID2_DESIGNER); | 
|  | 2338 | goto failure; | 
|  | 2339 | } | 
|  | 2340 |  | 
|  | 2341 | /* The number of physical channels on this HW */ | 
|  | 2342 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 
|  | 2343 |  | 
|  | 2344 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 
|  | 2345 | (i >> 4) & 0xf, res->start); | 
|  | 2346 |  | 
|  | 2347 | plat_data = pdev->dev.platform_data; | 
|  | 2348 |  | 
|  | 2349 | /* Count the number of logical channels in use */ | 
|  | 2350 | for (i = 0; i < plat_data->dev_len; i++) | 
|  | 2351 | if (plat_data->dev_rx[i] != 0) | 
|  | 2352 | num_log_chans++; | 
|  | 2353 |  | 
|  | 2354 | for (i = 0; i < plat_data->dev_len; i++) | 
|  | 2355 | if (plat_data->dev_tx[i] != 0) | 
|  | 2356 | num_log_chans++; | 
|  | 2357 |  | 
|  | 2358 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + | 
|  | 2359 | (num_phy_chans + num_log_chans + plat_data->memcpy_len) * | 
|  | 2360 | sizeof(struct d40_chan), GFP_KERNEL); | 
|  | 2361 |  | 
|  | 2362 | if (base == NULL) { | 
|  | 2363 | dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); | 
|  | 2364 | goto failure; | 
|  | 2365 | } | 
|  | 2366 |  | 
|  | 2367 | base->clk = clk; | 
|  | 2368 | base->num_phy_chans = num_phy_chans; | 
|  | 2369 | base->num_log_chans = num_log_chans; | 
|  | 2370 | base->phy_start = res->start; | 
|  | 2371 | base->phy_size = resource_size(res); | 
|  | 2372 | base->virtbase = virtbase; | 
|  | 2373 | base->plat_data = plat_data; | 
|  | 2374 | base->dev = &pdev->dev; | 
|  | 2375 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | 
|  | 2376 | base->log_chans = &base->phy_chans[num_phy_chans]; | 
|  | 2377 |  | 
|  | 2378 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), | 
|  | 2379 | GFP_KERNEL); | 
|  | 2380 | if (!base->phy_res) | 
|  | 2381 | goto failure; | 
|  | 2382 |  | 
|  | 2383 | base->lookup_phy_chans = kzalloc(num_phy_chans * | 
|  | 2384 | sizeof(struct d40_chan *), | 
|  | 2385 | GFP_KERNEL); | 
|  | 2386 | if (!base->lookup_phy_chans) | 
|  | 2387 | goto failure; | 
|  | 2388 |  | 
|  | 2389 | if (num_log_chans + plat_data->memcpy_len) { | 
|  | 2390 | /* | 
|  | 2391 | * The max number of logical channels are event lines for all | 
|  | 2392 | * src devices and dst devices | 
|  | 2393 | */ | 
|  | 2394 | base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * | 
|  | 2395 | sizeof(struct d40_chan *), | 
|  | 2396 | GFP_KERNEL); | 
|  | 2397 | if (!base->lookup_log_chans) | 
|  | 2398 | goto failure; | 
|  | 2399 | } | 
|  | 2400 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), | 
|  | 2401 | GFP_KERNEL); | 
|  | 2402 | if (!base->lcla_pool.alloc_map) | 
|  | 2403 | goto failure; | 
|  | 2404 |  | 
|  | 2405 | return base; | 
|  | 2406 |  | 
|  | 2407 | failure: | 
|  | 2408 | if (clk) { | 
|  | 2409 | clk_disable(clk); | 
|  | 2410 | clk_put(clk); | 
|  | 2411 | } | 
|  | 2412 | if (virtbase) | 
|  | 2413 | iounmap(virtbase); | 
|  | 2414 | if (res) | 
|  | 2415 | release_mem_region(res->start, | 
|  | 2416 | resource_size(res)); | 
|  | 2417 | if (virtbase) | 
|  | 2418 | iounmap(virtbase); | 
|  | 2419 |  | 
|  | 2420 | if (base) { | 
|  | 2421 | kfree(base->lcla_pool.alloc_map); | 
|  | 2422 | kfree(base->lookup_log_chans); | 
|  | 2423 | kfree(base->lookup_phy_chans); | 
|  | 2424 | kfree(base->phy_res); | 
|  | 2425 | kfree(base); | 
|  | 2426 | } | 
|  | 2427 |  | 
|  | 2428 | return NULL; | 
|  | 2429 | } | 
|  | 2430 |  | 
|  | 2431 | static void __init d40_hw_init(struct d40_base *base) | 
|  | 2432 | { | 
|  | 2433 |  | 
|  | 2434 | static const struct d40_reg_val dma_init_reg[] = { | 
|  | 2435 | /* Clock every part of the DMA block from start */ | 
|  | 2436 | { .reg = D40_DREG_GCC,    .val = 0x0000ff01}, | 
|  | 2437 |  | 
|  | 2438 | /* Interrupts on all logical channels */ | 
|  | 2439 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 
|  | 2440 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | 
|  | 2441 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | 
|  | 2442 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | 
|  | 2443 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | 
|  | 2444 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | 
|  | 2445 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | 
|  | 2446 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | 
|  | 2447 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | 
|  | 2448 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | 
|  | 2449 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | 
|  | 2450 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | 
|  | 2451 | }; | 
|  | 2452 | int i; | 
|  | 2453 | u32 prmseo[2] = {0, 0}; | 
|  | 2454 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | 
|  | 2455 | u32 pcmis = 0; | 
|  | 2456 | u32 pcicr = 0; | 
|  | 2457 |  | 
|  | 2458 | for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) | 
|  | 2459 | writel(dma_init_reg[i].val, | 
|  | 2460 | base->virtbase + dma_init_reg[i].reg); | 
|  | 2461 |  | 
|  | 2462 | /* Configure all our dma channels to default settings */ | 
|  | 2463 | for (i = 0; i < base->num_phy_chans; i++) { | 
|  | 2464 |  | 
|  | 2465 | activeo[i % 2] = activeo[i % 2] << 2; | 
|  | 2466 |  | 
|  | 2467 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src | 
|  | 2468 | == D40_ALLOC_PHY) { | 
|  | 2469 | activeo[i % 2] |= 3; | 
|  | 2470 | continue; | 
|  | 2471 | } | 
|  | 2472 |  | 
|  | 2473 | /* Enable interrupt # */ | 
|  | 2474 | pcmis = (pcmis << 1) | 1; | 
|  | 2475 |  | 
|  | 2476 | /* Clear interrupt # */ | 
|  | 2477 | pcicr = (pcicr << 1) | 1; | 
|  | 2478 |  | 
|  | 2479 | /* Set channel to physical mode */ | 
|  | 2480 | prmseo[i % 2] = prmseo[i % 2] << 2; | 
|  | 2481 | prmseo[i % 2] |= 1; | 
|  | 2482 |  | 
|  | 2483 | } | 
|  | 2484 |  | 
|  | 2485 | writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); | 
|  | 2486 | writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); | 
|  | 2487 | writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); | 
|  | 2488 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | 
|  | 2489 |  | 
|  | 2490 | /* Write which interrupt to enable */ | 
|  | 2491 | writel(pcmis, base->virtbase + D40_DREG_PCMIS); | 
|  | 2492 |  | 
|  | 2493 | /* Write which interrupt to clear */ | 
|  | 2494 | writel(pcicr, base->virtbase + D40_DREG_PCICR); | 
|  | 2495 |  | 
|  | 2496 | } | 
|  | 2497 |  | 
|  | 2498 | static int __init d40_probe(struct platform_device *pdev) | 
|  | 2499 | { | 
|  | 2500 | int err; | 
|  | 2501 | int ret = -ENOENT; | 
|  | 2502 | struct d40_base *base; | 
|  | 2503 | struct resource *res = NULL; | 
|  | 2504 | int num_reserved_chans; | 
|  | 2505 | u32 val; | 
|  | 2506 |  | 
|  | 2507 | base = d40_hw_detect_init(pdev); | 
|  | 2508 |  | 
|  | 2509 | if (!base) | 
|  | 2510 | goto failure; | 
|  | 2511 |  | 
|  | 2512 | num_reserved_chans = d40_phy_res_init(base); | 
|  | 2513 |  | 
|  | 2514 | platform_set_drvdata(pdev, base); | 
|  | 2515 |  | 
|  | 2516 | spin_lock_init(&base->interrupt_lock); | 
|  | 2517 | spin_lock_init(&base->execmd_lock); | 
|  | 2518 |  | 
|  | 2519 | /* Get IO for logical channel parameter address */ | 
|  | 2520 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | 
|  | 2521 | if (!res) { | 
|  | 2522 | ret = -ENOENT; | 
|  | 2523 | dev_err(&pdev->dev, | 
|  | 2524 | "[%s] No \"lcpa\" memory resource\n", | 
|  | 2525 | __func__); | 
|  | 2526 | goto failure; | 
|  | 2527 | } | 
|  | 2528 | base->lcpa_size = resource_size(res); | 
|  | 2529 | base->phy_lcpa = res->start; | 
|  | 2530 |  | 
|  | 2531 | if (request_mem_region(res->start, resource_size(res), | 
|  | 2532 | D40_NAME " I/O lcpa") == NULL) { | 
|  | 2533 | ret = -EBUSY; | 
|  | 2534 | dev_err(&pdev->dev, | 
|  | 2535 | "[%s] Failed to request LCPA region 0x%x-0x%x\n", | 
|  | 2536 | __func__, res->start, res->end); | 
|  | 2537 | goto failure; | 
|  | 2538 | } | 
|  | 2539 |  | 
|  | 2540 | /* We make use of ESRAM memory for this. */ | 
|  | 2541 | val = readl(base->virtbase + D40_DREG_LCPA); | 
|  | 2542 | if (res->start != val && val != 0) { | 
|  | 2543 | dev_warn(&pdev->dev, | 
|  | 2544 | "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", | 
|  | 2545 | __func__, val, res->start); | 
|  | 2546 | } else | 
|  | 2547 | writel(res->start, base->virtbase + D40_DREG_LCPA); | 
|  | 2548 |  | 
|  | 2549 | base->lcpa_base = ioremap(res->start, resource_size(res)); | 
|  | 2550 | if (!base->lcpa_base) { | 
|  | 2551 | ret = -ENOMEM; | 
|  | 2552 | dev_err(&pdev->dev, | 
|  | 2553 | "[%s] Failed to ioremap LCPA region\n", | 
|  | 2554 | __func__); | 
|  | 2555 | goto failure; | 
|  | 2556 | } | 
|  | 2557 | /* Get IO for logical channel link address */ | 
|  | 2558 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); | 
|  | 2559 | if (!res) { | 
|  | 2560 | ret = -ENOENT; | 
|  | 2561 | dev_err(&pdev->dev, | 
|  | 2562 | "[%s] No \"lcla\" resource defined\n", | 
|  | 2563 | __func__); | 
|  | 2564 | goto failure; | 
|  | 2565 | } | 
|  | 2566 |  | 
|  | 2567 | base->lcla_pool.base_size = resource_size(res); | 
|  | 2568 | base->lcla_pool.phy = res->start; | 
|  | 2569 |  | 
|  | 2570 | if (request_mem_region(res->start, resource_size(res), | 
|  | 2571 | D40_NAME " I/O lcla") == NULL) { | 
|  | 2572 | ret = -EBUSY; | 
|  | 2573 | dev_err(&pdev->dev, | 
|  | 2574 | "[%s] Failed to request LCLA region 0x%x-0x%x\n", | 
|  | 2575 | __func__, res->start, res->end); | 
|  | 2576 | goto failure; | 
|  | 2577 | } | 
|  | 2578 | val = readl(base->virtbase + D40_DREG_LCLA); | 
|  | 2579 | if (res->start != val && val != 0) { | 
|  | 2580 | dev_warn(&pdev->dev, | 
|  | 2581 | "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", | 
|  | 2582 | __func__, val, res->start); | 
|  | 2583 | } else | 
|  | 2584 | writel(res->start, base->virtbase + D40_DREG_LCLA); | 
|  | 2585 |  | 
|  | 2586 | base->lcla_pool.base = ioremap(res->start, resource_size(res)); | 
|  | 2587 | if (!base->lcla_pool.base) { | 
|  | 2588 | ret = -ENOMEM; | 
|  | 2589 | dev_err(&pdev->dev, | 
|  | 2590 | "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", | 
|  | 2591 | __func__, res->start, res->end); | 
|  | 2592 | goto failure; | 
|  | 2593 | } | 
|  | 2594 |  | 
|  | 2595 | spin_lock_init(&base->lcla_pool.lock); | 
|  | 2596 |  | 
|  | 2597 | base->lcla_pool.num_blocks = base->num_phy_chans; | 
|  | 2598 |  | 
|  | 2599 | base->irq = platform_get_irq(pdev, 0); | 
|  | 2600 |  | 
|  | 2601 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | 
|  | 2602 |  | 
|  | 2603 | if (ret) { | 
|  | 2604 | dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); | 
|  | 2605 | goto failure; | 
|  | 2606 | } | 
|  | 2607 |  | 
|  | 2608 | err = d40_dmaengine_init(base, num_reserved_chans); | 
|  | 2609 | if (err) | 
|  | 2610 | goto failure; | 
|  | 2611 |  | 
|  | 2612 | d40_hw_init(base); | 
|  | 2613 |  | 
|  | 2614 | dev_info(base->dev, "initialized\n"); | 
|  | 2615 | return 0; | 
|  | 2616 |  | 
|  | 2617 | failure: | 
|  | 2618 | if (base) { | 
|  | 2619 | if (base->virtbase) | 
|  | 2620 | iounmap(base->virtbase); | 
|  | 2621 | if (base->lcla_pool.phy) | 
|  | 2622 | release_mem_region(base->lcla_pool.phy, | 
|  | 2623 | base->lcla_pool.base_size); | 
|  | 2624 | if (base->phy_lcpa) | 
|  | 2625 | release_mem_region(base->phy_lcpa, | 
|  | 2626 | base->lcpa_size); | 
|  | 2627 | if (base->phy_start) | 
|  | 2628 | release_mem_region(base->phy_start, | 
|  | 2629 | base->phy_size); | 
|  | 2630 | if (base->clk) { | 
|  | 2631 | clk_disable(base->clk); | 
|  | 2632 | clk_put(base->clk); | 
|  | 2633 | } | 
|  | 2634 |  | 
|  | 2635 | kfree(base->lcla_pool.alloc_map); | 
|  | 2636 | kfree(base->lookup_log_chans); | 
|  | 2637 | kfree(base->lookup_phy_chans); | 
|  | 2638 | kfree(base->phy_res); | 
|  | 2639 | kfree(base); | 
|  | 2640 | } | 
|  | 2641 |  | 
|  | 2642 | dev_err(&pdev->dev, "[%s] probe failed\n", __func__); | 
|  | 2643 | return ret; | 
|  | 2644 | } | 
|  | 2645 |  | 
|  | 2646 | static struct platform_driver d40_driver = { | 
|  | 2647 | .driver = { | 
|  | 2648 | .owner = THIS_MODULE, | 
|  | 2649 | .name  = D40_NAME, | 
|  | 2650 | }, | 
|  | 2651 | }; | 
|  | 2652 |  | 
|  | 2653 | int __init stedma40_init(void) | 
|  | 2654 | { | 
|  | 2655 | return platform_driver_probe(&d40_driver, d40_probe); | 
|  | 2656 | } | 
|  | 2657 | arch_initcall(stedma40_init); |