| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify it | 
 | 5 |  * under the terms of the GNU General Public License as published by the Free | 
 | 6 |  * Software Foundation; either version 2 of the License, or (at your option) | 
 | 7 |  * any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, but WITHOUT | 
 | 10 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 11 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 12 |  * more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License along with | 
 | 15 |  * this program; if not, write to the Free Software Foundation, Inc., 59 | 
 | 16 |  * Temple Place - Suite 330, Boston, MA  02111-1307, USA. | 
 | 17 |  * | 
 | 18 |  * The full GNU General Public License is included in this distribution in the | 
 | 19 |  * file called COPYING. | 
 | 20 |  */ | 
 | 21 | #ifndef DMAENGINE_H | 
 | 22 | #define DMAENGINE_H | 
| David Woodhouse | 1c0f16e | 2006-06-27 02:53:56 -0700 | [diff] [blame] | 23 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 24 | #include <linux/device.h> | 
 | 25 | #include <linux/uio.h> | 
 | 26 | #include <linux/kref.h> | 
 | 27 | #include <linux/completion.h> | 
 | 28 | #include <linux/rcupdate.h> | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 29 | #include <linux/dma-mapping.h> | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 30 |  | 
 | 31 | /** | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 32 |  * enum dma_state - resource PNP/power managment state | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 33 |  * @DMA_RESOURCE_SUSPEND: DMA device going into low power state | 
 | 34 |  * @DMA_RESOURCE_RESUME: DMA device returning to full power | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 35 |  * @DMA_RESOURCE_AVAILABLE: DMA device available to the system | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 36 |  * @DMA_RESOURCE_REMOVED: DMA device removed from the system | 
 | 37 |  */ | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 38 | enum dma_state { | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 39 | 	DMA_RESOURCE_SUSPEND, | 
 | 40 | 	DMA_RESOURCE_RESUME, | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 41 | 	DMA_RESOURCE_AVAILABLE, | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 42 | 	DMA_RESOURCE_REMOVED, | 
 | 43 | }; | 
 | 44 |  | 
 | 45 | /** | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 46 |  * enum dma_state_client - state of the channel in the client | 
 | 47 |  * @DMA_ACK: client would like to use, or was using this channel | 
 | 48 |  * @DMA_DUP: client has already seen this channel, or is not using this channel | 
 | 49 |  * @DMA_NAK: client does not want to see any more channels | 
 | 50 |  */ | 
 | 51 | enum dma_state_client { | 
 | 52 | 	DMA_ACK, | 
 | 53 | 	DMA_DUP, | 
 | 54 | 	DMA_NAK, | 
 | 55 | }; | 
 | 56 |  | 
 | 57 | /** | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 58 |  * typedef dma_cookie_t - an opaque DMA cookie | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 59 |  * | 
 | 60 |  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 
 | 61 |  */ | 
 | 62 | typedef s32 dma_cookie_t; | 
 | 63 |  | 
 | 64 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | 
 | 65 |  | 
 | 66 | /** | 
 | 67 |  * enum dma_status - DMA transaction status | 
 | 68 |  * @DMA_SUCCESS: transaction completed successfully | 
 | 69 |  * @DMA_IN_PROGRESS: transaction not yet processed | 
 | 70 |  * @DMA_ERROR: transaction failed | 
 | 71 |  */ | 
 | 72 | enum dma_status { | 
 | 73 | 	DMA_SUCCESS, | 
 | 74 | 	DMA_IN_PROGRESS, | 
 | 75 | 	DMA_ERROR, | 
 | 76 | }; | 
 | 77 |  | 
 | 78 | /** | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 79 |  * enum dma_transaction_type - DMA transaction types/indexes | 
 | 80 |  */ | 
 | 81 | enum dma_transaction_type { | 
 | 82 | 	DMA_MEMCPY, | 
 | 83 | 	DMA_XOR, | 
 | 84 | 	DMA_PQ_XOR, | 
 | 85 | 	DMA_DUAL_XOR, | 
 | 86 | 	DMA_PQ_UPDATE, | 
 | 87 | 	DMA_ZERO_SUM, | 
 | 88 | 	DMA_PQ_ZERO_SUM, | 
 | 89 | 	DMA_MEMSET, | 
 | 90 | 	DMA_MEMCPY_CRC32C, | 
 | 91 | 	DMA_INTERRUPT, | 
 | 92 | }; | 
 | 93 |  | 
 | 94 | /* last transaction type for creation of the capabilities mask */ | 
 | 95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | 
 | 96 |  | 
 | 97 | /** | 
 | 98 |  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | 
 | 99 |  * See linux/cpumask.h | 
 | 100 |  */ | 
 | 101 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 
 | 102 |  | 
 | 103 | /** | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 104 |  * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 
 | 105 |  * @refcount: local_t used for open-coded "bigref" counting | 
 | 106 |  * @memcpy_count: transaction counter | 
 | 107 |  * @bytes_transferred: byte counter | 
 | 108 |  */ | 
 | 109 |  | 
 | 110 | struct dma_chan_percpu { | 
 | 111 | 	local_t refcount; | 
 | 112 | 	/* stats */ | 
 | 113 | 	unsigned long memcpy_count; | 
 | 114 | 	unsigned long bytes_transferred; | 
 | 115 | }; | 
 | 116 |  | 
 | 117 | /** | 
 | 118 |  * struct dma_chan - devices supply DMA channels, clients use them | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 119 |  * @device: ptr to the dma device who supplies this channel, always !%NULL | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 120 |  * @cookie: last cookie value returned to client | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 121 |  * @chan_id: channel ID for sysfs | 
 | 122 |  * @class_dev: class device for sysfs | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 123 |  * @refcount: kref, used in "bigref" slow-mode | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 124 |  * @slow_ref: indicates that the DMA channel is free | 
 | 125 |  * @rcu: the DMA channel's RCU head | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 126 |  * @device_node: used to add this to the device chan list | 
 | 127 |  * @local: per-cpu pointer to a struct dma_chan_percpu | 
 | 128 |  */ | 
 | 129 | struct dma_chan { | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 130 | 	struct dma_device *device; | 
 | 131 | 	dma_cookie_t cookie; | 
 | 132 |  | 
 | 133 | 	/* sysfs */ | 
 | 134 | 	int chan_id; | 
| Tony Jones | 891f78e | 2007-09-25 02:03:03 +0200 | [diff] [blame] | 135 | 	struct device dev; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 136 |  | 
 | 137 | 	struct kref refcount; | 
 | 138 | 	int slow_ref; | 
 | 139 | 	struct rcu_head rcu; | 
 | 140 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 141 | 	struct list_head device_node; | 
 | 142 | 	struct dma_chan_percpu *local; | 
 | 143 | }; | 
 | 144 |  | 
| Tony Jones | 891f78e | 2007-09-25 02:03:03 +0200 | [diff] [blame] | 145 | #define to_dma_chan(p) container_of(p, struct dma_chan, dev) | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 146 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 147 | void dma_chan_cleanup(struct kref *kref); | 
 | 148 |  | 
 | 149 | static inline void dma_chan_get(struct dma_chan *chan) | 
 | 150 | { | 
 | 151 | 	if (unlikely(chan->slow_ref)) | 
 | 152 | 		kref_get(&chan->refcount); | 
 | 153 | 	else { | 
 | 154 | 		local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | 
 | 155 | 		put_cpu(); | 
 | 156 | 	} | 
 | 157 | } | 
 | 158 |  | 
 | 159 | static inline void dma_chan_put(struct dma_chan *chan) | 
 | 160 | { | 
 | 161 | 	if (unlikely(chan->slow_ref)) | 
 | 162 | 		kref_put(&chan->refcount, dma_chan_cleanup); | 
 | 163 | 	else { | 
 | 164 | 		local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | 
 | 165 | 		put_cpu(); | 
 | 166 | 	} | 
 | 167 | } | 
 | 168 |  | 
 | 169 | /* | 
 | 170 |  * typedef dma_event_callback - function pointer to a DMA event callback | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 171 |  * For each channel added to the system this routine is called for each client. | 
 | 172 |  * If the client would like to use the channel it returns '1' to signal (ack) | 
 | 173 |  * the dmaengine core to take out a reference on the channel and its | 
 | 174 |  * corresponding device.  A client must not 'ack' an available channel more | 
 | 175 |  * than once.  When a channel is removed all clients are notified.  If a client | 
 | 176 |  * is using the channel it must 'ack' the removal.  A client must not 'ack' a | 
 | 177 |  * removed channel more than once. | 
 | 178 |  * @client - 'this' pointer for the client context | 
 | 179 |  * @chan - channel to be acted upon | 
 | 180 |  * @state - available or removed | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 181 |  */ | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 182 | struct dma_client; | 
 | 183 | typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, | 
 | 184 | 		struct dma_chan *chan, enum dma_state state); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 185 |  | 
 | 186 | /** | 
 | 187 |  * struct dma_client - info on the entity making use of DMA services | 
 | 188 |  * @event_callback: func ptr to call when something happens | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 189 |  * @cap_mask: only return channels that satisfy the requested capabilities | 
 | 190 |  *  a value of zero corresponds to any capability | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 191 |  * @global_node: list_head for global dma_client_list | 
 | 192 |  */ | 
 | 193 | struct dma_client { | 
 | 194 | 	dma_event_callback	event_callback; | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 195 | 	dma_cap_mask_t		cap_mask; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 196 | 	struct list_head	global_node; | 
 | 197 | }; | 
 | 198 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 199 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 
 | 200 | /** | 
 | 201 |  * struct dma_async_tx_descriptor - async transaction descriptor | 
 | 202 |  * ---dma generic offload fields--- | 
 | 203 |  * @cookie: tracking cookie for this transaction, set to -EBUSY if | 
 | 204 |  *	this tx is sitting on a dependency list | 
 | 205 |  * @ack: the descriptor can not be reused until the client acknowledges | 
 | 206 |  *	receipt, i.e. has has a chance to establish any dependency chains | 
 | 207 |  * @phys: physical address of the descriptor | 
 | 208 |  * @tx_list: driver common field for operations that require multiple | 
 | 209 |  *	descriptors | 
 | 210 |  * @chan: target channel for this operation | 
 | 211 |  * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 
 | 212 |  * @tx_set_dest: set a destination address in a hardware descriptor | 
 | 213 |  * @tx_set_src: set a source address in a hardware descriptor | 
 | 214 |  * @callback: routine to call after this operation is complete | 
 | 215 |  * @callback_param: general parameter to pass to the callback routine | 
 | 216 |  * ---async_tx api specific fields--- | 
 | 217 |  * @depend_list: at completion this list of transactions are submitted | 
 | 218 |  * @depend_node: allow this transaction to be executed after another | 
 | 219 |  *	transaction has completed, possibly on another channel | 
 | 220 |  * @parent: pointer to the next level up in the dependency chain | 
 | 221 |  * @lock: protect the dependency list | 
 | 222 |  */ | 
 | 223 | struct dma_async_tx_descriptor { | 
 | 224 | 	dma_cookie_t cookie; | 
 | 225 | 	int ack; | 
 | 226 | 	dma_addr_t phys; | 
 | 227 | 	struct list_head tx_list; | 
 | 228 | 	struct dma_chan *chan; | 
 | 229 | 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 
 | 230 | 	void (*tx_set_dest)(dma_addr_t addr, | 
 | 231 | 		struct dma_async_tx_descriptor *tx, int index); | 
 | 232 | 	void (*tx_set_src)(dma_addr_t addr, | 
 | 233 | 		struct dma_async_tx_descriptor *tx, int index); | 
 | 234 | 	dma_async_tx_callback callback; | 
 | 235 | 	void *callback_param; | 
 | 236 | 	struct list_head depend_list; | 
 | 237 | 	struct list_head depend_node; | 
 | 238 | 	struct dma_async_tx_descriptor *parent; | 
 | 239 | 	spinlock_t lock; | 
 | 240 | }; | 
 | 241 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 242 | /** | 
 | 243 |  * struct dma_device - info on the entity supplying DMA services | 
 | 244 |  * @chancnt: how many DMA channels are supported | 
 | 245 |  * @channels: the list of struct dma_chan | 
 | 246 |  * @global_node: list_head for global dma_device_list | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 247 |  * @cap_mask: one or more dma_capability flags | 
 | 248 |  * @max_xor: maximum number of xor sources, 0 if no capability | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 249 |  * @refcount: reference count | 
 | 250 |  * @done: IO completion struct | 
 | 251 |  * @dev_id: unique device ID | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 252 |  * @dev: struct device reference for dma mapping api | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 253 |  * @device_alloc_chan_resources: allocate resources and return the | 
 | 254 |  *	number of allocated descriptors | 
 | 255 |  * @device_free_chan_resources: release DMA channel's resources | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 256 |  * @device_prep_dma_memcpy: prepares a memcpy operation | 
 | 257 |  * @device_prep_dma_xor: prepares a xor operation | 
 | 258 |  * @device_prep_dma_zero_sum: prepares a zero_sum operation | 
 | 259 |  * @device_prep_dma_memset: prepares a memset operation | 
 | 260 |  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 
 | 261 |  * @device_dependency_added: async_tx notifies the channel about new deps | 
 | 262 |  * @device_issue_pending: push pending transactions to hardware | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 263 |  */ | 
 | 264 | struct dma_device { | 
 | 265 |  | 
 | 266 | 	unsigned int chancnt; | 
 | 267 | 	struct list_head channels; | 
 | 268 | 	struct list_head global_node; | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 269 | 	dma_cap_mask_t  cap_mask; | 
 | 270 | 	int max_xor; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 271 |  | 
 | 272 | 	struct kref refcount; | 
 | 273 | 	struct completion done; | 
 | 274 |  | 
 | 275 | 	int dev_id; | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 276 | 	struct device *dev; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 277 |  | 
 | 278 | 	int (*device_alloc_chan_resources)(struct dma_chan *chan); | 
 | 279 | 	void (*device_free_chan_resources)(struct dma_chan *chan); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 280 |  | 
 | 281 | 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 
 | 282 | 		struct dma_chan *chan, size_t len, int int_en); | 
 | 283 | 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 
 | 284 | 		struct dma_chan *chan, unsigned int src_cnt, size_t len, | 
 | 285 | 		int int_en); | 
 | 286 | 	struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( | 
 | 287 | 		struct dma_chan *chan, unsigned int src_cnt, size_t len, | 
 | 288 | 		u32 *result, int int_en); | 
 | 289 | 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 
 | 290 | 		struct dma_chan *chan, int value, size_t len, int int_en); | 
 | 291 | 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 
 | 292 | 		struct dma_chan *chan); | 
 | 293 |  | 
 | 294 | 	void (*device_dependency_added)(struct dma_chan *chan); | 
 | 295 | 	enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 296 | 			dma_cookie_t cookie, dma_cookie_t *last, | 
 | 297 | 			dma_cookie_t *used); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 298 | 	void (*device_issue_pending)(struct dma_chan *chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 299 | }; | 
 | 300 |  | 
 | 301 | /* --- public DMA engine API --- */ | 
 | 302 |  | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 303 | void dma_async_client_register(struct dma_client *client); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 304 | void dma_async_client_unregister(struct dma_client *client); | 
| Dan Williams | d379b01 | 2007-07-09 11:56:42 -0700 | [diff] [blame] | 305 | void dma_async_client_chan_request(struct dma_client *client); | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 306 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 
 | 307 | 	void *dest, void *src, size_t len); | 
 | 308 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 
 | 309 | 	struct page *page, unsigned int offset, void *kdata, size_t len); | 
 | 310 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 311 | 	struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 312 | 	unsigned int src_off, size_t len); | 
 | 313 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 
 | 314 | 	struct dma_chan *chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 315 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 316 | static inline void | 
 | 317 | async_tx_ack(struct dma_async_tx_descriptor *tx) | 
 | 318 | { | 
 | 319 | 	tx->ack = 1; | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 320 | } | 
 | 321 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 322 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 
 | 323 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) | 
 | 324 | { | 
 | 325 | 	return min_t(int, DMA_TX_TYPE_END, | 
 | 326 | 		find_first_bit(srcp->bits, DMA_TX_TYPE_END)); | 
 | 327 | } | 
 | 328 |  | 
 | 329 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) | 
 | 330 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) | 
 | 331 | { | 
 | 332 | 	return min_t(int, DMA_TX_TYPE_END, | 
 | 333 | 		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | 
 | 334 | } | 
 | 335 |  | 
 | 336 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) | 
 | 337 | static inline void | 
 | 338 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 
 | 339 | { | 
 | 340 | 	set_bit(tx_type, dstp->bits); | 
 | 341 | } | 
 | 342 |  | 
 | 343 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) | 
 | 344 | static inline int | 
 | 345 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | 
 | 346 | { | 
 | 347 | 	return test_bit(tx_type, srcp->bits); | 
 | 348 | } | 
 | 349 |  | 
 | 350 | #define for_each_dma_cap_mask(cap, mask) \ | 
 | 351 | 	for ((cap) = first_dma_cap(mask);	\ | 
 | 352 | 		(cap) < DMA_TX_TYPE_END;	\ | 
 | 353 | 		(cap) = next_dma_cap((cap), (mask))) | 
 | 354 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 355 | /** | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 356 |  * dma_async_issue_pending - flush pending transactions to HW | 
| Randy Dunlap | fe4ada2 | 2006-07-03 19:44:51 -0700 | [diff] [blame] | 357 |  * @chan: target DMA channel | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 358 |  * | 
 | 359 |  * This allows drivers to push copies to HW in batches, | 
 | 360 |  * reducing MMIO writes where possible. | 
 | 361 |  */ | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 362 | static inline void dma_async_issue_pending(struct dma_chan *chan) | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 363 | { | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 364 | 	return chan->device->device_issue_pending(chan); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 365 | } | 
 | 366 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 367 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | 
 | 368 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 369 | /** | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 370 |  * dma_async_is_tx_complete - poll for transaction completion | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 371 |  * @chan: DMA channel | 
 | 372 |  * @cookie: transaction identifier to check status of | 
 | 373 |  * @last: returns last completed cookie, can be NULL | 
 | 374 |  * @used: returns last issued cookie, can be NULL | 
 | 375 |  * | 
 | 376 |  * If @last and @used are passed in, upon return they reflect the driver | 
 | 377 |  * internal state and can be used with dma_async_is_complete() to check | 
 | 378 |  * the status of multiple cookies without re-checking hardware state. | 
 | 379 |  */ | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 380 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 381 | 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 
 | 382 | { | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 383 | 	return chan->device->device_is_tx_complete(chan, cookie, last, used); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 384 | } | 
 | 385 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 386 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | 
 | 387 | 	dma_async_is_tx_complete(chan, cookie, last, used) | 
 | 388 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 389 | /** | 
 | 390 |  * dma_async_is_complete - test a cookie against chan state | 
 | 391 |  * @cookie: transaction identifier to test status of | 
 | 392 |  * @last_complete: last know completed transaction | 
 | 393 |  * @last_used: last cookie value handed out | 
 | 394 |  * | 
 | 395 |  * dma_async_is_complete() is used in dma_async_memcpy_complete() | 
 | 396 |  * the test logic is seperated for lightweight testing of multiple cookies | 
 | 397 |  */ | 
 | 398 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | 
 | 399 | 			dma_cookie_t last_complete, dma_cookie_t last_used) | 
 | 400 | { | 
 | 401 | 	if (last_complete <= last_used) { | 
 | 402 | 		if ((cookie <= last_complete) || (cookie > last_used)) | 
 | 403 | 			return DMA_SUCCESS; | 
 | 404 | 	} else { | 
 | 405 | 		if ((cookie <= last_complete) && (cookie > last_used)) | 
 | 406 | 			return DMA_SUCCESS; | 
 | 407 | 	} | 
 | 408 | 	return DMA_IN_PROGRESS; | 
 | 409 | } | 
 | 410 |  | 
| Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 411 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 412 |  | 
 | 413 | /* --- DMA device --- */ | 
 | 414 |  | 
 | 415 | int dma_async_device_register(struct dma_device *device); | 
 | 416 | void dma_async_device_unregister(struct dma_device *device); | 
 | 417 |  | 
| Chris Leech | de5506e | 2006-05-23 17:50:37 -0700 | [diff] [blame] | 418 | /* --- Helper iov-locking functions --- */ | 
 | 419 |  | 
 | 420 | struct dma_page_list { | 
 | 421 | 	char *base_address; | 
 | 422 | 	int nr_pages; | 
 | 423 | 	struct page **pages; | 
 | 424 | }; | 
 | 425 |  | 
 | 426 | struct dma_pinned_list { | 
 | 427 | 	int nr_iovecs; | 
 | 428 | 	struct dma_page_list page_list[0]; | 
 | 429 | }; | 
 | 430 |  | 
 | 431 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); | 
 | 432 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); | 
 | 433 |  | 
 | 434 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | 
 | 435 | 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); | 
 | 436 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | 
 | 437 | 	struct dma_pinned_list *pinned_list, struct page *page, | 
 | 438 | 	unsigned int offset, size_t len); | 
 | 439 |  | 
| Chris Leech | c13c826 | 2006-05-23 17:18:44 -0700 | [diff] [blame] | 440 | #endif /* DMAENGINE_H */ |