| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify it | 
 | 5 |  * under the terms of the GNU General Public License as published by the Free | 
 | 6 |  * Software Foundation; either version 2 of the License, or (at your option) | 
 | 7 |  * any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, but WITHOUT | 
 | 10 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 11 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 12 |  * more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License along with | 
 | 15 |  * this program; if not, write to the Free Software Foundation, Inc., 59 | 
 | 16 |  * Temple Place - Suite 330, Boston, MA  02111-1307, USA. | 
 | 17 |  * | 
 | 18 |  * The full GNU General Public License is included in this distribution in the | 
 | 19 |  * file called COPYING. | 
 | 20 |  */ | 
 | 21 | #ifndef DMAENGINE_H | 
 | 22 | #define DMAENGINE_H | 
| David Woodhouse | 1c0f16e | 2006-06-27 02:53:56 -0700 | [diff] [blame] | 23 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 24 | #include <linux/device.h> | 
 | 25 | #include <linux/uio.h> | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 26 | #include <linux/dma-mapping.h> | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 27 |  | 
 | 28 | /** | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 29 |  * typedef dma_cookie_t - an opaque DMA cookie | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 30 |  * | 
 | 31 |  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 
 | 32 |  */ | 
 | 33 | typedef s32 dma_cookie_t; | 
| Steven J. Magnani | 76bd061 | 2010-02-28 22:18:16 -0700 | [diff] [blame] | 34 | #define DMA_MIN_COOKIE	1 | 
 | 35 | #define DMA_MAX_COOKIE	INT_MAX | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 36 |  | 
 | 37 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | 
 | 38 |  | 
 | 39 | /** | 
 | 40 |  * enum dma_status - DMA transaction status | 
 | 41 |  * @DMA_SUCCESS: transaction completed successfully | 
 | 42 |  * @DMA_IN_PROGRESS: transaction not yet processed | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 43 |  * @DMA_PAUSED: transaction is paused | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 44 |  * @DMA_ERROR: transaction failed | 
 | 45 |  */ | 
 | 46 | enum dma_status { | 
 | 47 | 	DMA_SUCCESS, | 
 | 48 | 	DMA_IN_PROGRESS, | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 49 | 	DMA_PAUSED, | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 50 | 	DMA_ERROR, | 
 | 51 | }; | 
 | 52 |  | 
 | 53 | /** | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 54 |  * enum dma_transaction_type - DMA transaction types/indexes | 
| Dan Williams | 138f4c3 | 2009-09-08 17:42:51 -0700 | [diff] [blame] | 55 |  * | 
 | 56 |  * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is | 
 | 57 |  * automatically set as dma devices are registered. | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 58 |  */ | 
 | 59 | enum dma_transaction_type { | 
 | 60 | 	DMA_MEMCPY, | 
 | 61 | 	DMA_XOR, | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 62 | 	DMA_PQ, | 
| Dan Williams | 099f53c | 2009-04-08 14:28:37 -0700 | [diff] [blame] | 63 | 	DMA_XOR_VAL, | 
 | 64 | 	DMA_PQ_VAL, | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 65 | 	DMA_MEMSET, | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 66 | 	DMA_INTERRUPT, | 
| Dan Williams | 59b5ec2 | 2009-01-06 11:38:15 -0700 | [diff] [blame] | 67 | 	DMA_PRIVATE, | 
| Dan Williams | 138f4c3 | 2009-09-08 17:42:51 -0700 | [diff] [blame] | 68 | 	DMA_ASYNC_TX, | 
| Haavard Skinnemoen | dc0ee64 | 2008-07-08 11:59:35 -0700 | [diff] [blame] | 69 | 	DMA_SLAVE, | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 70 | }; | 
 | 71 |  | 
 | 72 | /* last transaction type for creation of the capabilities mask */ | 
| Haavard Skinnemoen | dc0ee64 | 2008-07-08 11:59:35 -0700 | [diff] [blame] | 73 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 
 | 74 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 75 |  | 
 | 76 | /** | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 77 |  * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 78 |  *  control completion, and communicate status. | 
| Dan Williams | d4c56f9 | 2008-02-02 19:49:58 -0700 | [diff] [blame] | 79 |  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 80 |  *  this transaction | 
| Guennadi Liakhovetski | a88f666 | 2009-12-10 18:35:15 +0100 | [diff] [blame] | 81 |  * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 82 |  *  acknowledges receipt, i.e. has has a chance to establish any dependency | 
 | 83 |  *  chains | 
| Dan Williams | e1d181e | 2008-07-04 00:13:40 -0700 | [diff] [blame] | 84 |  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | 
 | 85 |  * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | 
| Maciej Sosnowski | 4f005db | 2009-04-23 12:31:51 +0200 | [diff] [blame] | 86 |  * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | 
 | 87 |  * 	(if not set, do the source dma-unmapping as page) | 
 | 88 |  * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | 
 | 89 |  * 	(if not set, do the destination dma-unmapping as page) | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 90 |  * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | 
 | 91 |  * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | 
 | 92 |  * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | 
 | 93 |  *  sources that were the result of a previous operation, in the case of a PQ | 
 | 94 |  *  operation it continues the calculation with new sources | 
| Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 95 |  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend | 
 | 96 |  *  on the result of this operation | 
| Dan Williams | d4c56f9 | 2008-02-02 19:49:58 -0700 | [diff] [blame] | 97 |  */ | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 98 | enum dma_ctrl_flags { | 
| Dan Williams | d4c56f9 | 2008-02-02 19:49:58 -0700 | [diff] [blame] | 99 | 	DMA_PREP_INTERRUPT = (1 << 0), | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 100 | 	DMA_CTRL_ACK = (1 << 1), | 
| Dan Williams | e1d181e | 2008-07-04 00:13:40 -0700 | [diff] [blame] | 101 | 	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | 
 | 102 | 	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 
| Maciej Sosnowski | 4f005db | 2009-04-23 12:31:51 +0200 | [diff] [blame] | 103 | 	DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 
 | 104 | 	DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 
| Dan Williams | f9dd213 | 2009-09-08 17:42:29 -0700 | [diff] [blame] | 105 | 	DMA_PREP_PQ_DISABLE_P = (1 << 6), | 
 | 106 | 	DMA_PREP_PQ_DISABLE_Q = (1 << 7), | 
 | 107 | 	DMA_PREP_CONTINUE = (1 << 8), | 
| Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 108 | 	DMA_PREP_FENCE = (1 << 9), | 
| Dan Williams | d4c56f9 | 2008-02-02 19:49:58 -0700 | [diff] [blame] | 109 | }; | 
 | 110 |  | 
 | 111 | /** | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 112 |  * enum dma_ctrl_cmd - DMA operations that can optionally be exercised | 
 | 113 |  * on a running channel. | 
 | 114 |  * @DMA_TERMINATE_ALL: terminate all ongoing transfers | 
 | 115 |  * @DMA_PAUSE: pause ongoing transfers | 
 | 116 |  * @DMA_RESUME: resume paused transfer | 
 | 117 |  */ | 
 | 118 | enum dma_ctrl_cmd { | 
 | 119 | 	DMA_TERMINATE_ALL, | 
 | 120 | 	DMA_PAUSE, | 
 | 121 | 	DMA_RESUME, | 
 | 122 | }; | 
 | 123 |  | 
 | 124 | /** | 
| Dan Williams | ad283ea | 2009-08-29 19:09:26 -0700 | [diff] [blame] | 125 |  * enum sum_check_bits - bit position of pq_check_flags | 
 | 126 |  */ | 
 | 127 | enum sum_check_bits { | 
 | 128 | 	SUM_CHECK_P = 0, | 
 | 129 | 	SUM_CHECK_Q = 1, | 
 | 130 | }; | 
 | 131 |  | 
 | 132 | /** | 
 | 133 |  * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations | 
 | 134 |  * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise | 
 | 135 |  * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise | 
 | 136 |  */ | 
 | 137 | enum sum_check_flags { | 
 | 138 | 	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), | 
 | 139 | 	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), | 
 | 140 | }; | 
 | 141 |  | 
 | 142 |  | 
 | 143 | /** | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 144 |  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | 
 | 145 |  * See linux/cpumask.h | 
 | 146 |  */ | 
 | 147 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 
 | 148 |  | 
 | 149 | /** | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 150 |  * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 151 |  * @memcpy_count: transaction counter | 
 | 152 |  * @bytes_transferred: byte counter | 
 | 153 |  */ | 
 | 154 |  | 
 | 155 | struct dma_chan_percpu { | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 156 | 	/* stats */ | 
 | 157 | 	unsigned long memcpy_count; | 
 | 158 | 	unsigned long bytes_transferred; | 
 | 159 | }; | 
 | 160 |  | 
 | 161 | /** | 
 | 162 |  * struct dma_chan - devices supply DMA channels, clients use them | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 163 |  * @device: ptr to the dma device who supplies this channel, always !%NULL | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 164 |  * @cookie: last cookie value returned to client | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 165 |  * @chan_id: channel ID for sysfs | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 166 |  * @dev: class device for sysfs | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 167 |  * @device_node: used to add this to the device chan list | 
 | 168 |  * @local: per-cpu pointer to a struct dma_chan_percpu | 
| Dan Williams | 7cc5bf9 | 2008-07-08 11:58:21 -0700 | [diff] [blame] | 169 |  * @client-count: how many clients are using this channel | 
| Dan Williams | bec0851 | 2009-01-06 11:38:14 -0700 | [diff] [blame] | 170 |  * @table_count: number of appearances in the mem-to-mem allocation table | 
| Dan Williams | 287d859 | 2009-02-18 14:48:26 -0800 | [diff] [blame] | 171 |  * @private: private data for certain client-channel associations | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 172 |  */ | 
 | 173 | struct dma_chan { | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 174 | 	struct dma_device *device; | 
 | 175 | 	dma_cookie_t cookie; | 
 | 176 |  | 
 | 177 | 	/* sysfs */ | 
 | 178 | 	int chan_id; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 179 | 	struct dma_chan_dev *dev; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 180 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 181 | 	struct list_head device_node; | 
| Tejun Heo | a29d8b8 | 2010-02-02 14:39:15 +0900 | [diff] [blame] | 182 | 	struct dma_chan_percpu __percpu *local; | 
| Dan Williams | 7cc5bf9 | 2008-07-08 11:58:21 -0700 | [diff] [blame] | 183 | 	int client_count; | 
| Dan Williams | bec0851 | 2009-01-06 11:38:14 -0700 | [diff] [blame] | 184 | 	int table_count; | 
| Dan Williams | 287d859 | 2009-02-18 14:48:26 -0800 | [diff] [blame] | 185 | 	void *private; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 186 | }; | 
 | 187 |  | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 188 | /** | 
 | 189 |  * struct dma_chan_dev - relate sysfs device node to backing channel device | 
 | 190 |  * @chan - driver channel device | 
 | 191 |  * @device - sysfs device | 
| Dan Williams | 864498a | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 192 |  * @dev_id - parent dma_device dev_id | 
 | 193 |  * @idr_ref - reference count to gate release of dma_device dev_id | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 194 |  */ | 
 | 195 | struct dma_chan_dev { | 
 | 196 | 	struct dma_chan *chan; | 
 | 197 | 	struct device device; | 
| Dan Williams | 864498a | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 198 | 	int dev_id; | 
 | 199 | 	atomic_t *idr_ref; | 
| Dan Williams | 41d5e59 | 2009-01-06 11:38:21 -0700 | [diff] [blame] | 200 | }; | 
 | 201 |  | 
 | 202 | static inline const char *dma_chan_name(struct dma_chan *chan) | 
 | 203 | { | 
 | 204 | 	return dev_name(&chan->dev->device); | 
 | 205 | } | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 206 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 207 | void dma_chan_cleanup(struct kref *kref); | 
 | 208 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 209 | /** | 
| Dan Williams | 59b5ec2 | 2009-01-06 11:38:15 -0700 | [diff] [blame] | 210 |  * typedef dma_filter_fn - callback filter for dma_request_channel | 
 | 211 |  * @chan: channel to be reviewed | 
 | 212 |  * @filter_param: opaque parameter passed through dma_request_channel | 
 | 213 |  * | 
 | 214 |  * When this optional parameter is specified in a call to dma_request_channel a | 
 | 215 |  * suitable channel is passed to this routine for further dispositioning before | 
 | 216 |  * being returned.  Where 'suitable' indicates a non-busy channel that | 
| Dan Williams | 7dd6025 | 2009-01-06 11:38:19 -0700 | [diff] [blame] | 217 |  * satisfies the given capability mask.  It returns 'true' to indicate that the | 
 | 218 |  * channel is suitable. | 
| Dan Williams | 59b5ec2 | 2009-01-06 11:38:15 -0700 | [diff] [blame] | 219 |  */ | 
| Dan Williams | 7dd6025 | 2009-01-06 11:38:19 -0700 | [diff] [blame] | 220 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 
| Dan Williams | 59b5ec2 | 2009-01-06 11:38:15 -0700 | [diff] [blame] | 221 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 222 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 
 | 223 | /** | 
 | 224 |  * struct dma_async_tx_descriptor - async transaction descriptor | 
 | 225 |  * ---dma generic offload fields--- | 
 | 226 |  * @cookie: tracking cookie for this transaction, set to -EBUSY if | 
 | 227 |  *	this tx is sitting on a dependency list | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 228 |  * @flags: flags to augment operation preparation, control completion, and | 
 | 229 |  * 	communicate status | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 230 |  * @phys: physical address of the descriptor | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 231 |  * @chan: target channel for this operation | 
 | 232 |  * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 233 |  * @callback: routine to call after this operation is complete | 
 | 234 |  * @callback_param: general parameter to pass to the callback routine | 
 | 235 |  * ---async_tx api specific fields--- | 
| Dan Williams | 19242d7 | 2008-04-17 20:17:25 -0700 | [diff] [blame] | 236 |  * @next: at completion submit this descriptor | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 237 |  * @parent: pointer to the next level up in the dependency chain | 
| Dan Williams | 19242d7 | 2008-04-17 20:17:25 -0700 | [diff] [blame] | 238 |  * @lock: protect the parent and next pointers | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 239 |  */ | 
 | 240 | struct dma_async_tx_descriptor { | 
 | 241 | 	dma_cookie_t cookie; | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 242 | 	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 243 | 	dma_addr_t phys; | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 244 | 	struct dma_chan *chan; | 
 | 245 | 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 246 | 	dma_async_tx_callback callback; | 
 | 247 | 	void *callback_param; | 
| Dan Williams | caa20d97 | 2010-05-17 16:24:16 -0700 | [diff] [blame] | 248 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 
| Dan Williams | 19242d7 | 2008-04-17 20:17:25 -0700 | [diff] [blame] | 249 | 	struct dma_async_tx_descriptor *next; | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 250 | 	struct dma_async_tx_descriptor *parent; | 
 | 251 | 	spinlock_t lock; | 
| Dan Williams | caa20d97 | 2010-05-17 16:24:16 -0700 | [diff] [blame] | 252 | #endif | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 253 | }; | 
 | 254 |  | 
| Dan Williams | caa20d97 | 2010-05-17 16:24:16 -0700 | [diff] [blame] | 255 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 
 | 256 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 
 | 257 | { | 
 | 258 | } | 
 | 259 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | 
 | 260 | { | 
 | 261 | } | 
 | 262 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | 
 | 263 | { | 
 | 264 | 	BUG(); | 
 | 265 | } | 
 | 266 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | 
 | 267 | { | 
 | 268 | } | 
 | 269 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | 
 | 270 | { | 
 | 271 | } | 
 | 272 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | 
 | 273 | { | 
 | 274 | 	return NULL; | 
 | 275 | } | 
 | 276 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | 
 | 277 | { | 
 | 278 | 	return NULL; | 
 | 279 | } | 
 | 280 |  | 
 | 281 | #else | 
 | 282 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 
 | 283 | { | 
 | 284 | 	spin_lock_bh(&txd->lock); | 
 | 285 | } | 
 | 286 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | 
 | 287 | { | 
 | 288 | 	spin_unlock_bh(&txd->lock); | 
 | 289 | } | 
 | 290 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | 
 | 291 | { | 
 | 292 | 	txd->next = next; | 
 | 293 | 	next->parent = txd; | 
 | 294 | } | 
 | 295 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | 
 | 296 | { | 
 | 297 | 	txd->parent = NULL; | 
 | 298 | } | 
 | 299 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | 
 | 300 | { | 
 | 301 | 	txd->next = NULL; | 
 | 302 | } | 
 | 303 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | 
 | 304 | { | 
 | 305 | 	return txd->parent; | 
 | 306 | } | 
 | 307 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | 
 | 308 | { | 
 | 309 | 	return txd->next; | 
 | 310 | } | 
 | 311 | #endif | 
 | 312 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 313 | /** | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 314 |  * struct dma_tx_state - filled in to report the status of | 
 | 315 |  * a transfer. | 
 | 316 |  * @last: last completed DMA cookie | 
 | 317 |  * @used: last issued DMA cookie (i.e. the one in progress) | 
 | 318 |  * @residue: the remaining number of bytes left to transmit | 
 | 319 |  *	on the selected transfer for states DMA_IN_PROGRESS and | 
 | 320 |  *	DMA_PAUSED if this is implemented in the driver, else 0 | 
 | 321 |  */ | 
 | 322 | struct dma_tx_state { | 
 | 323 | 	dma_cookie_t last; | 
 | 324 | 	dma_cookie_t used; | 
 | 325 | 	u32 residue; | 
 | 326 | }; | 
 | 327 |  | 
 | 328 | /** | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 329 |  * struct dma_device - info on the entity supplying DMA services | 
 | 330 |  * @chancnt: how many DMA channels are supported | 
| Atsushi Nemoto | 0f57151 | 2009-03-06 20:07:14 +0900 | [diff] [blame] | 331 |  * @privatecnt: how many DMA channels are requested by dma_request_channel | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 332 |  * @channels: the list of struct dma_chan | 
 | 333 |  * @global_node: list_head for global dma_device_list | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 334 |  * @cap_mask: one or more dma_capability flags | 
 | 335 |  * @max_xor: maximum number of xor sources, 0 if no capability | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 336 |  * @max_pq: maximum number of PQ sources and PQ-continue capability | 
| Dan Williams | 83544ae | 2009-09-08 17:42:53 -0700 | [diff] [blame] | 337 |  * @copy_align: alignment shift for memcpy operations | 
 | 338 |  * @xor_align: alignment shift for xor operations | 
 | 339 |  * @pq_align: alignment shift for pq operations | 
 | 340 |  * @fill_align: alignment shift for memset operations | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 341 |  * @dev_id: unique device ID | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 342 |  * @dev: struct device reference for dma mapping api | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 343 |  * @device_alloc_chan_resources: allocate resources and return the | 
 | 344 |  *	number of allocated descriptors | 
 | 345 |  * @device_free_chan_resources: release DMA channel's resources | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 346 |  * @device_prep_dma_memcpy: prepares a memcpy operation | 
 | 347 |  * @device_prep_dma_xor: prepares a xor operation | 
| Dan Williams | 099f53c | 2009-04-08 14:28:37 -0700 | [diff] [blame] | 348 |  * @device_prep_dma_xor_val: prepares a xor validation operation | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 349 |  * @device_prep_dma_pq: prepares a pq operation | 
 | 350 |  * @device_prep_dma_pq_val: prepares a pqzero_sum operation | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 351 |  * @device_prep_dma_memset: prepares a memset operation | 
 | 352 |  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 
| Haavard Skinnemoen | dc0ee64 | 2008-07-08 11:59:35 -0700 | [diff] [blame] | 353 |  * @device_prep_slave_sg: prepares a slave dma operation | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 354 |  * @device_control: manipulate all pending operations on a channel, returns | 
 | 355 |  *	zero or error code | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 356 |  * @device_tx_status: poll for transaction completion, the optional | 
 | 357 |  *	txstate parameter can be supplied with a pointer to get a | 
 | 358 |  *	struct with auxilary transfer status information, otherwise the call | 
 | 359 |  *	will just return a simple status code | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 360 |  * @device_issue_pending: push pending transactions to hardware | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 361 |  */ | 
 | 362 | struct dma_device { | 
 | 363 |  | 
 | 364 | 	unsigned int chancnt; | 
| Atsushi Nemoto | 0f57151 | 2009-03-06 20:07:14 +0900 | [diff] [blame] | 365 | 	unsigned int privatecnt; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 366 | 	struct list_head channels; | 
 | 367 | 	struct list_head global_node; | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 368 | 	dma_cap_mask_t  cap_mask; | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 369 | 	unsigned short max_xor; | 
 | 370 | 	unsigned short max_pq; | 
| Dan Williams | 83544ae | 2009-09-08 17:42:53 -0700 | [diff] [blame] | 371 | 	u8 copy_align; | 
 | 372 | 	u8 xor_align; | 
 | 373 | 	u8 pq_align; | 
 | 374 | 	u8 fill_align; | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 375 | 	#define DMA_HAS_PQ_CONTINUE (1 << 15) | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 376 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 377 | 	int dev_id; | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 378 | 	struct device *dev; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 379 |  | 
| Dan Williams | aa1e6f1 | 2009-01-06 11:38:17 -0700 | [diff] [blame] | 380 | 	int (*device_alloc_chan_resources)(struct dma_chan *chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 381 | 	void (*device_free_chan_resources)(struct dma_chan *chan); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 382 |  | 
 | 383 | 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 
| Dan Williams | 0036731 | 2008-02-02 19:49:57 -0700 | [diff] [blame] | 384 | 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 
| Dan Williams | d4c56f9 | 2008-02-02 19:49:58 -0700 | [diff] [blame] | 385 | 		size_t len, unsigned long flags); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 386 | 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 
| Dan Williams | 0036731 | 2008-02-02 19:49:57 -0700 | [diff] [blame] | 387 | 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 
| Dan Williams | d4c56f9 | 2008-02-02 19:49:58 -0700 | [diff] [blame] | 388 | 		unsigned int src_cnt, size_t len, unsigned long flags); | 
| Dan Williams | 099f53c | 2009-04-08 14:28:37 -0700 | [diff] [blame] | 389 | 	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( | 
| Dan Williams | 0036731 | 2008-02-02 19:49:57 -0700 | [diff] [blame] | 390 | 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt, | 
| Dan Williams | ad283ea | 2009-08-29 19:09:26 -0700 | [diff] [blame] | 391 | 		size_t len, enum sum_check_flags *result, unsigned long flags); | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 392 | 	struct dma_async_tx_descriptor *(*device_prep_dma_pq)( | 
 | 393 | 		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | 
 | 394 | 		unsigned int src_cnt, const unsigned char *scf, | 
 | 395 | 		size_t len, unsigned long flags); | 
 | 396 | 	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( | 
 | 397 | 		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | 
 | 398 | 		unsigned int src_cnt, const unsigned char *scf, size_t len, | 
 | 399 | 		enum sum_check_flags *pqres, unsigned long flags); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 400 | 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 
| Dan Williams | 0036731 | 2008-02-02 19:49:57 -0700 | [diff] [blame] | 401 | 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 
| Dan Williams | d4c56f9 | 2008-02-02 19:49:58 -0700 | [diff] [blame] | 402 | 		unsigned long flags); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 403 | 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 404 | 		struct dma_chan *chan, unsigned long flags); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 405 |  | 
| Haavard Skinnemoen | dc0ee64 | 2008-07-08 11:59:35 -0700 | [diff] [blame] | 406 | 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 
 | 407 | 		struct dma_chan *chan, struct scatterlist *sgl, | 
 | 408 | 		unsigned int sg_len, enum dma_data_direction direction, | 
 | 409 | 		unsigned long flags); | 
| Linus Walleij | 0582763 | 2010-05-17 16:30:42 -0700 | [diff] [blame] | 410 | 	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 
 | 411 | 		unsigned long arg); | 
| Haavard Skinnemoen | dc0ee64 | 2008-07-08 11:59:35 -0700 | [diff] [blame] | 412 |  | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 413 | 	enum dma_status (*device_tx_status)(struct dma_chan *chan, | 
 | 414 | 					    dma_cookie_t cookie, | 
 | 415 | 					    struct dma_tx_state *txstate); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 416 | 	void (*device_issue_pending)(struct dma_chan *chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 417 | }; | 
 | 418 |  | 
| Dan Williams | 83544ae | 2009-09-08 17:42:53 -0700 | [diff] [blame] | 419 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) | 
 | 420 | { | 
 | 421 | 	size_t mask; | 
 | 422 |  | 
 | 423 | 	if (!align) | 
 | 424 | 		return true; | 
 | 425 | 	mask = (1 << align) - 1; | 
 | 426 | 	if (mask & (off1 | off2 | len)) | 
 | 427 | 		return false; | 
 | 428 | 	return true; | 
 | 429 | } | 
 | 430 |  | 
 | 431 | static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, | 
 | 432 | 				       size_t off2, size_t len) | 
 | 433 | { | 
 | 434 | 	return dmaengine_check_align(dev->copy_align, off1, off2, len); | 
 | 435 | } | 
 | 436 |  | 
 | 437 | static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, | 
 | 438 | 				      size_t off2, size_t len) | 
 | 439 | { | 
 | 440 | 	return dmaengine_check_align(dev->xor_align, off1, off2, len); | 
 | 441 | } | 
 | 442 |  | 
 | 443 | static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, | 
 | 444 | 				     size_t off2, size_t len) | 
 | 445 | { | 
 | 446 | 	return dmaengine_check_align(dev->pq_align, off1, off2, len); | 
 | 447 | } | 
 | 448 |  | 
 | 449 | static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, | 
 | 450 | 				       size_t off2, size_t len) | 
 | 451 | { | 
 | 452 | 	return dmaengine_check_align(dev->fill_align, off1, off2, len); | 
 | 453 | } | 
 | 454 |  | 
| Dan Williams | b2f46fd | 2009-07-14 12:20:36 -0700 | [diff] [blame] | 455 | static inline void | 
 | 456 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) | 
 | 457 | { | 
 | 458 | 	dma->max_pq = maxpq; | 
 | 459 | 	if (has_pq_continue) | 
 | 460 | 		dma->max_pq |= DMA_HAS_PQ_CONTINUE; | 
 | 461 | } | 
 | 462 |  | 
 | 463 | static inline bool dmaf_continue(enum dma_ctrl_flags flags) | 
 | 464 | { | 
 | 465 | 	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; | 
 | 466 | } | 
 | 467 |  | 
 | 468 | static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) | 
 | 469 | { | 
 | 470 | 	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; | 
 | 471 |  | 
 | 472 | 	return (flags & mask) == mask; | 
 | 473 | } | 
 | 474 |  | 
 | 475 | static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | 
 | 476 | { | 
 | 477 | 	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | 
 | 478 | } | 
 | 479 |  | 
 | 480 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | 
 | 481 | { | 
 | 482 | 	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | 
 | 483 | } | 
 | 484 |  | 
 | 485 | /* dma_maxpq - reduce maxpq in the face of continued operations | 
 | 486 |  * @dma - dma device with PQ capability | 
 | 487 |  * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set | 
 | 488 |  * | 
 | 489 |  * When an engine does not support native continuation we need 3 extra | 
 | 490 |  * source slots to reuse P and Q with the following coefficients: | 
 | 491 |  * 1/ {00} * P : remove P from Q', but use it as a source for P' | 
 | 492 |  * 2/ {01} * Q : use Q to continue Q' calculation | 
 | 493 |  * 3/ {00} * Q : subtract Q from P' to cancel (2) | 
 | 494 |  * | 
 | 495 |  * In the case where P is disabled we only need 1 extra source: | 
 | 496 |  * 1/ {01} * Q : use Q to continue Q' calculation | 
 | 497 |  */ | 
 | 498 | static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) | 
 | 499 | { | 
 | 500 | 	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) | 
 | 501 | 		return dma_dev_to_maxpq(dma); | 
 | 502 | 	else if (dmaf_p_disabled_continue(flags)) | 
 | 503 | 		return dma_dev_to_maxpq(dma) - 1; | 
 | 504 | 	else if (dmaf_continue(flags)) | 
 | 505 | 		return dma_dev_to_maxpq(dma) - 3; | 
 | 506 | 	BUG(); | 
 | 507 | } | 
 | 508 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 509 | /* --- public DMA engine API --- */ | 
 | 510 |  | 
| Dan Williams | 649274d | 2009-01-11 00:20:39 -0800 | [diff] [blame] | 511 | #ifdef CONFIG_DMA_ENGINE | 
| Dan Williams | 209b84a | 2009-01-06 11:38:17 -0700 | [diff] [blame] | 512 | void dmaengine_get(void); | 
 | 513 | void dmaengine_put(void); | 
| Dan Williams | 649274d | 2009-01-11 00:20:39 -0800 | [diff] [blame] | 514 | #else | 
 | 515 | static inline void dmaengine_get(void) | 
 | 516 | { | 
 | 517 | } | 
 | 518 | static inline void dmaengine_put(void) | 
 | 519 | { | 
 | 520 | } | 
 | 521 | #endif | 
 | 522 |  | 
| David S. Miller | b4bd07c | 2009-02-06 22:06:43 -0800 | [diff] [blame] | 523 | #ifdef CONFIG_NET_DMA | 
 | 524 | #define net_dmaengine_get()	dmaengine_get() | 
 | 525 | #define net_dmaengine_put()	dmaengine_put() | 
 | 526 | #else | 
 | 527 | static inline void net_dmaengine_get(void) | 
 | 528 | { | 
 | 529 | } | 
 | 530 | static inline void net_dmaengine_put(void) | 
 | 531 | { | 
 | 532 | } | 
 | 533 | #endif | 
 | 534 |  | 
| Dan Williams | 729b5d1 | 2009-03-25 09:13:25 -0700 | [diff] [blame] | 535 | #ifdef CONFIG_ASYNC_TX_DMA | 
 | 536 | #define async_dmaengine_get()	dmaengine_get() | 
 | 537 | #define async_dmaengine_put()	dmaengine_put() | 
| Dan Williams | 138f4c3 | 2009-09-08 17:42:51 -0700 | [diff] [blame] | 538 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 
 | 539 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) | 
 | 540 | #else | 
| Dan Williams | 729b5d1 | 2009-03-25 09:13:25 -0700 | [diff] [blame] | 541 | #define async_dma_find_channel(type) dma_find_channel(type) | 
| Dan Williams | 138f4c3 | 2009-09-08 17:42:51 -0700 | [diff] [blame] | 542 | #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ | 
| Dan Williams | 729b5d1 | 2009-03-25 09:13:25 -0700 | [diff] [blame] | 543 | #else | 
 | 544 | static inline void async_dmaengine_get(void) | 
 | 545 | { | 
 | 546 | } | 
 | 547 | static inline void async_dmaengine_put(void) | 
 | 548 | { | 
 | 549 | } | 
 | 550 | static inline struct dma_chan * | 
 | 551 | async_dma_find_channel(enum dma_transaction_type type) | 
 | 552 | { | 
 | 553 | 	return NULL; | 
 | 554 | } | 
| Dan Williams | 138f4c3 | 2009-09-08 17:42:51 -0700 | [diff] [blame] | 555 | #endif /* CONFIG_ASYNC_TX_DMA */ | 
| Dan Williams | 729b5d1 | 2009-03-25 09:13:25 -0700 | [diff] [blame] | 556 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 557 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 
 | 558 | 	void *dest, void *src, size_t len); | 
 | 559 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 
 | 560 | 	struct page *page, unsigned int offset, void *kdata, size_t len); | 
 | 561 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 562 | 	struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 563 | 	unsigned int src_off, size_t len); | 
 | 564 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 
 | 565 | 	struct dma_chan *chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 566 |  | 
| Dan Williams | 0839875 | 2008-07-17 17:59:56 -0700 | [diff] [blame] | 567 | static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 568 | { | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 569 | 	tx->flags |= DMA_CTRL_ACK; | 
 | 570 | } | 
 | 571 |  | 
| Guennadi Liakhovetski | ef56068 | 2009-01-19 15:36:21 -0700 | [diff] [blame] | 572 | static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) | 
 | 573 | { | 
 | 574 | 	tx->flags &= ~DMA_CTRL_ACK; | 
 | 575 | } | 
 | 576 |  | 
| Dan Williams | 0839875 | 2008-07-17 17:59:56 -0700 | [diff] [blame] | 577 | static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) | 
| Dan Williams | 636bdeaa | 2008-04-17 20:17:26 -0700 | [diff] [blame] | 578 | { | 
| Dan Williams | 0839875 | 2008-07-17 17:59:56 -0700 | [diff] [blame] | 579 | 	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 580 | } | 
 | 581 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 582 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 
 | 583 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) | 
 | 584 | { | 
 | 585 | 	return min_t(int, DMA_TX_TYPE_END, | 
 | 586 | 		find_first_bit(srcp->bits, DMA_TX_TYPE_END)); | 
 | 587 | } | 
 | 588 |  | 
 | 589 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) | 
 | 590 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) | 
 | 591 | { | 
 | 592 | 	return min_t(int, DMA_TX_TYPE_END, | 
 | 593 | 		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | 
 | 594 | } | 
 | 595 |  | 
 | 596 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) | 
 | 597 | static inline void | 
 | 598 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 
 | 599 | { | 
 | 600 | 	set_bit(tx_type, dstp->bits); | 
 | 601 | } | 
 | 602 |  | 
| Atsushi Nemoto | 0f57151 | 2009-03-06 20:07:14 +0900 | [diff] [blame] | 603 | #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) | 
 | 604 | static inline void | 
 | 605 | __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 
 | 606 | { | 
 | 607 | 	clear_bit(tx_type, dstp->bits); | 
 | 608 | } | 
 | 609 |  | 
| Dan Williams | 33df8ca | 2009-01-06 11:38:15 -0700 | [diff] [blame] | 610 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | 
 | 611 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | 
 | 612 | { | 
 | 613 | 	bitmap_zero(dstp->bits, DMA_TX_TYPE_END); | 
 | 614 | } | 
 | 615 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 616 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) | 
 | 617 | static inline int | 
 | 618 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | 
 | 619 | { | 
 | 620 | 	return test_bit(tx_type, srcp->bits); | 
 | 621 | } | 
 | 622 |  | 
 | 623 | #define for_each_dma_cap_mask(cap, mask) \ | 
 | 624 | 	for ((cap) = first_dma_cap(mask);	\ | 
 | 625 | 		(cap) < DMA_TX_TYPE_END;	\ | 
 | 626 | 		(cap) = next_dma_cap((cap), (mask))) | 
 | 627 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 628 | /** | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 629 |  * dma_async_issue_pending - flush pending transactions to HW | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 630 |  * @chan: target DMA channel | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 631 |  * | 
 | 632 |  * This allows drivers to push copies to HW in batches, | 
 | 633 |  * reducing MMIO writes where possible. | 
 | 634 |  */ | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 635 | static inline void dma_async_issue_pending(struct dma_chan *chan) | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 636 | { | 
| Dan Williams | ec8670f | 2008-03-01 07:51:29 -0700 | [diff] [blame] | 637 | 	chan->device->device_issue_pending(chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 638 | } | 
 | 639 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 640 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | 
 | 641 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 642 | /** | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 643 |  * dma_async_is_tx_complete - poll for transaction completion | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 644 |  * @chan: DMA channel | 
 | 645 |  * @cookie: transaction identifier to check status of | 
 | 646 |  * @last: returns last completed cookie, can be NULL | 
 | 647 |  * @used: returns last issued cookie, can be NULL | 
 | 648 |  * | 
 | 649 |  * If @last and @used are passed in, upon return they reflect the driver | 
 | 650 |  * internal state and can be used with dma_async_is_complete() to check | 
 | 651 |  * the status of multiple cookies without re-checking hardware state. | 
 | 652 |  */ | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 653 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 654 | 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 
 | 655 | { | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 656 | 	struct dma_tx_state state; | 
 | 657 | 	enum dma_status status; | 
 | 658 |  | 
 | 659 | 	status = chan->device->device_tx_status(chan, cookie, &state); | 
 | 660 | 	if (last) | 
 | 661 | 		*last = state.last; | 
 | 662 | 	if (used) | 
 | 663 | 		*used = state.used; | 
 | 664 | 	return status; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 665 | } | 
 | 666 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 667 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | 
 | 668 | 	dma_async_is_tx_complete(chan, cookie, last, used) | 
 | 669 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 670 | /** | 
 | 671 |  * dma_async_is_complete - test a cookie against chan state | 
 | 672 |  * @cookie: transaction identifier to test status of | 
 | 673 |  * @last_complete: last know completed transaction | 
 | 674 |  * @last_used: last cookie value handed out | 
 | 675 |  * | 
 | 676 |  * dma_async_is_complete() is used in dma_async_memcpy_complete() | 
| Sebastian Siewior | 8a5703f | 2008-04-21 22:38:45 +0000 | [diff] [blame] | 677 |  * the test logic is separated for lightweight testing of multiple cookies | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 678 |  */ | 
 | 679 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | 
 | 680 | 			dma_cookie_t last_complete, dma_cookie_t last_used) | 
 | 681 | { | 
 | 682 | 	if (last_complete <= last_used) { | 
 | 683 | 		if ((cookie <= last_complete) || (cookie > last_used)) | 
 | 684 | 			return DMA_SUCCESS; | 
 | 685 | 	} else { | 
 | 686 | 		if ((cookie <= last_complete) && (cookie > last_used)) | 
 | 687 | 			return DMA_SUCCESS; | 
 | 688 | 	} | 
 | 689 | 	return DMA_IN_PROGRESS; | 
 | 690 | } | 
 | 691 |  | 
| Dan Williams | bca3469 | 2010-03-26 16:52:10 -0700 | [diff] [blame] | 692 | static inline void | 
 | 693 | dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) | 
 | 694 | { | 
 | 695 | 	if (st) { | 
 | 696 | 		st->last = last; | 
 | 697 | 		st->used = used; | 
 | 698 | 		st->residue = residue; | 
 | 699 | 	} | 
 | 700 | } | 
 | 701 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 702 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 
| Dan Williams | 07f2211 | 2009-01-05 17:14:31 -0700 | [diff] [blame] | 703 | #ifdef CONFIG_DMA_ENGINE | 
 | 704 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 
| Dan Williams | c50331e | 2009-01-19 15:33:14 -0700 | [diff] [blame] | 705 | void dma_issue_pending_all(void); | 
| Dan Williams | 07f2211 | 2009-01-05 17:14:31 -0700 | [diff] [blame] | 706 | #else | 
 | 707 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 
 | 708 | { | 
 | 709 | 	return DMA_SUCCESS; | 
 | 710 | } | 
| Dan Williams | c50331e | 2009-01-19 15:33:14 -0700 | [diff] [blame] | 711 | static inline void dma_issue_pending_all(void) | 
 | 712 | { | 
 | 713 | 	do { } while (0); | 
 | 714 | } | 
| Dan Williams | 07f2211 | 2009-01-05 17:14:31 -0700 | [diff] [blame] | 715 | #endif | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 716 |  | 
 | 717 | /* --- DMA device --- */ | 
 | 718 |  | 
 | 719 | int dma_async_device_register(struct dma_device *device); | 
 | 720 | void dma_async_device_unregister(struct dma_device *device); | 
| Dan Williams | 07f2211 | 2009-01-05 17:14:31 -0700 | [diff] [blame] | 721 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 
| Dan Williams | bec0851 | 2009-01-06 11:38:14 -0700 | [diff] [blame] | 722 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | 
| Dan Williams | 59b5ec2 | 2009-01-06 11:38:15 -0700 | [diff] [blame] | 723 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 
 | 724 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | 
 | 725 | void dma_release_channel(struct dma_chan *chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 726 |  | 
| Chris Leech | de5506e | 2006-05-23 17:50:37 -0700 | [diff] [blame] | 727 | /* --- Helper iov-locking functions --- */ | 
 | 728 |  | 
 | 729 | struct dma_page_list { | 
| Al Viro | b2ddb90 | 2008-03-29 03:09:38 +0000 | [diff] [blame] | 730 | 	char __user *base_address; | 
| Chris Leech | de5506e | 2006-05-23 17:50:37 -0700 | [diff] [blame] | 731 | 	int nr_pages; | 
 | 732 | 	struct page **pages; | 
 | 733 | }; | 
 | 734 |  | 
 | 735 | struct dma_pinned_list { | 
 | 736 | 	int nr_iovecs; | 
 | 737 | 	struct dma_page_list page_list[0]; | 
 | 738 | }; | 
 | 739 |  | 
 | 740 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); | 
 | 741 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); | 
 | 742 |  | 
 | 743 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | 
 | 744 | 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); | 
 | 745 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | 
 | 746 | 	struct dma_pinned_list *pinned_list, struct page *page, | 
 | 747 | 	unsigned int offset, size_t len); | 
 | 748 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 749 | #endif /* DMAENGINE_H */ |