blob: b9008932a8f350778b783cafacd9e143b9ca8c6e [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35 * Both of these are protected by a mutex, dma_list_mutex.
36 *
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
39 *
Dan Williamsd379b012007-07-09 11:56:42 -070040 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
Chris Leechc13c8262006-05-23 17:18:44 -070042 *
43 * Each device has a kref, which is initialized to 1 when the device is
Tony Jones891f78e2007-09-25 02:03:03 +020044 * registered. A kref_get is done for each device registered. When the
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000045 * device is released, the corresponding kref_put is done in the release
Chris Leechc13c8262006-05-23 17:18:44 -070046 * method. Every time one of the device's channels is allocated to a client,
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000047 * a kref_get occurs. When the channel is freed, the corresponding kref_put
Chris Leechc13c8262006-05-23 17:18:44 -070048 * happens. The device's release function does a completion, so
Tony Jones891f78e2007-09-25 02:03:03 +020049 * unregister_device does a remove event, device_unregister, a kref_put
Chris Leechc13c8262006-05-23 17:18:44 -070050 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
Dan Williamsd379b012007-07-09 11:56:42 -070054 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000056 * a channel is removed or a client using it is unregistered. A client can
Dan Williamsd379b012007-07-09 11:56:42 -070057 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
Chris Leechc13c8262006-05-23 17:18:44 -070060 */
61
62#include <linux/init.h>
63#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070064#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070065#include <linux/device.h>
66#include <linux/dmaengine.h>
67#include <linux/hardirq.h>
68#include <linux/spinlock.h>
69#include <linux/percpu.h>
70#include <linux/rcupdate.h>
71#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070072#include <linux/jiffies.h>
Chris Leechc13c8262006-05-23 17:18:44 -070073
74static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list);
77
78/* --- sysfs implementation --- */
79
Tony Jones891f78e2007-09-25 02:03:03 +020080static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070081{
Tony Jones891f78e2007-09-25 02:03:03 +020082 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -070083 unsigned long count = 0;
84 int i;
85
Andrew Morton17f3ae02006-05-25 13:26:53 -070086 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070087 count += per_cpu_ptr(chan->local, i)->memcpy_count;
88
89 return sprintf(buf, "%lu\n", count);
90}
91
Tony Jones891f78e2007-09-25 02:03:03 +020092static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
93 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070094{
Tony Jones891f78e2007-09-25 02:03:03 +020095 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -070096 unsigned long count = 0;
97 int i;
98
Andrew Morton17f3ae02006-05-25 13:26:53 -070099 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -0700100 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
101
102 return sprintf(buf, "%lu\n", count);
103}
104
Tony Jones891f78e2007-09-25 02:03:03 +0200105static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700106{
Tony Jones891f78e2007-09-25 02:03:03 +0200107 struct dma_chan *chan = to_dma_chan(dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700108 int in_use = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700109
Dan Williamsd379b012007-07-09 11:56:42 -0700110 if (unlikely(chan->slow_ref) &&
111 atomic_read(&chan->refcount.refcount) > 1)
112 in_use = 1;
113 else {
114 if (local_read(&(per_cpu_ptr(chan->local,
115 get_cpu())->refcount)) > 0)
116 in_use = 1;
117 put_cpu();
118 }
119
120 return sprintf(buf, "%d\n", in_use);
Chris Leechc13c8262006-05-23 17:18:44 -0700121}
122
Tony Jones891f78e2007-09-25 02:03:03 +0200123static struct device_attribute dma_attrs[] = {
Chris Leechc13c8262006-05-23 17:18:44 -0700124 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
125 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
126 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
127 __ATTR_NULL
128};
129
130static void dma_async_device_cleanup(struct kref *kref);
131
Tony Jones891f78e2007-09-25 02:03:03 +0200132static void dma_dev_release(struct device *dev)
Chris Leechc13c8262006-05-23 17:18:44 -0700133{
Tony Jones891f78e2007-09-25 02:03:03 +0200134 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -0700135 kref_put(&chan->device->refcount, dma_async_device_cleanup);
136}
137
138static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200139 .name = "dma",
140 .dev_attrs = dma_attrs,
141 .dev_release = dma_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700142};
143
144/* --- client and device registration --- */
145
Dan Williamsd379b012007-07-09 11:56:42 -0700146#define dma_chan_satisfies_mask(chan, mask) \
147 __dma_chan_satisfies_mask((chan), &(mask))
148static int
149__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
150{
151 dma_cap_mask_t has;
152
153 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
154 DMA_TX_TYPE_END);
155 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156}
157
Chris Leechc13c8262006-05-23 17:18:44 -0700158/**
Dan Williamsd379b012007-07-09 11:56:42 -0700159 * dma_client_chan_alloc - try to allocate channels to a client
Chris Leechc13c8262006-05-23 17:18:44 -0700160 * @client: &dma_client
161 *
162 * Called with dma_list_mutex held.
163 */
Dan Williamsd379b012007-07-09 11:56:42 -0700164static void dma_client_chan_alloc(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700165{
166 struct dma_device *device;
167 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700168 int desc; /* allocated descriptor count */
Dan Williamsd379b012007-07-09 11:56:42 -0700169 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700170
Dan Williamsd379b012007-07-09 11:56:42 -0700171 /* Find a channel */
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700172 list_for_each_entry(device, &dma_device_list, global_node) {
173 /* Does the client require a specific DMA controller? */
174 if (client->slave && client->slave->dma_dev
175 && client->slave->dma_dev != device->dev)
176 continue;
177
Chris Leechc13c8262006-05-23 17:18:44 -0700178 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700179 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
Chris Leechc13c8262006-05-23 17:18:44 -0700180 continue;
181
Haavard Skinnemoen848c5362008-07-08 11:58:58 -0700182 desc = chan->device->device_alloc_chan_resources(
183 chan, client);
Chris Leechc13c8262006-05-23 17:18:44 -0700184 if (desc >= 0) {
Dan Williamsd379b012007-07-09 11:56:42 -0700185 ack = client->event_callback(client,
186 chan,
187 DMA_RESOURCE_AVAILABLE);
188
189 /* we are done once this client rejects
190 * an available resource
191 */
Dan Williams7cc5bf92008-07-08 11:58:21 -0700192 if (ack == DMA_ACK) {
Dan Williamsd379b012007-07-09 11:56:42 -0700193 dma_chan_get(chan);
Dan Williams7cc5bf92008-07-08 11:58:21 -0700194 chan->client_count++;
195 } else if (ack == DMA_NAK)
Dan Williamsd379b012007-07-09 11:56:42 -0700196 return;
Chris Leechc13c8262006-05-23 17:18:44 -0700197 }
198 }
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700199 }
Chris Leechc13c8262006-05-23 17:18:44 -0700200}
201
Dan Williams7405f742007-01-02 11:10:43 -0700202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
203{
204 enum dma_status status;
205 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
206
207 dma_async_issue_pending(chan);
208 do {
209 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
210 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
211 printk(KERN_ERR "dma_sync_wait_timeout!\n");
212 return DMA_ERROR;
213 }
214 } while (status == DMA_IN_PROGRESS);
215
216 return status;
217}
218EXPORT_SYMBOL(dma_sync_wait);
219
Chris Leechc13c8262006-05-23 17:18:44 -0700220/**
Randy Dunlap65088712006-07-03 19:45:31 -0700221 * dma_chan_cleanup - release a DMA channel's resources
222 * @kref: kernel reference structure that contains the DMA channel device
Chris Leechc13c8262006-05-23 17:18:44 -0700223 */
224void dma_chan_cleanup(struct kref *kref)
225{
226 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
227 chan->device->device_free_chan_resources(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700228 kref_put(&chan->device->refcount, dma_async_device_cleanup);
229}
David Brownell765e3d82007-03-16 13:38:05 -0800230EXPORT_SYMBOL(dma_chan_cleanup);
Chris Leechc13c8262006-05-23 17:18:44 -0700231
232static void dma_chan_free_rcu(struct rcu_head *rcu)
233{
234 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
235 int bias = 0x7FFFFFFF;
236 int i;
Andrew Morton17f3ae02006-05-25 13:26:53 -0700237 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -0700238 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
239 atomic_sub(bias, &chan->refcount.refcount);
240 kref_put(&chan->refcount, dma_chan_cleanup);
241}
242
Dan Williamsd379b012007-07-09 11:56:42 -0700243static void dma_chan_release(struct dma_chan *chan)
Chris Leechc13c8262006-05-23 17:18:44 -0700244{
245 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
246 chan->slow_ref = 1;
247 call_rcu(&chan->rcu, dma_chan_free_rcu);
248}
249
250/**
Dan Williamsd379b012007-07-09 11:56:42 -0700251 * dma_chans_notify_available - broadcast available channels to the clients
Chris Leechc13c8262006-05-23 17:18:44 -0700252 */
Dan Williamsd379b012007-07-09 11:56:42 -0700253static void dma_clients_notify_available(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700254{
255 struct dma_client *client;
Dan Williamsd379b012007-07-09 11:56:42 -0700256
257 mutex_lock(&dma_list_mutex);
258
259 list_for_each_entry(client, &dma_client_list, global_node)
260 dma_client_chan_alloc(client);
261
262 mutex_unlock(&dma_list_mutex);
263}
264
265/**
266 * dma_chans_notify_available - tell the clients that a channel is going away
267 * @chan: channel on its way out
268 */
269static void dma_clients_notify_removed(struct dma_chan *chan)
270{
271 struct dma_client *client;
272 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700273
274 mutex_lock(&dma_list_mutex);
275
276 list_for_each_entry(client, &dma_client_list, global_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700277 ack = client->event_callback(client, chan,
278 DMA_RESOURCE_REMOVED);
279
280 /* client was holding resources for this channel so
281 * free it
282 */
Dan Williams7cc5bf92008-07-08 11:58:21 -0700283 if (ack == DMA_ACK) {
Dan Williamsd379b012007-07-09 11:56:42 -0700284 dma_chan_put(chan);
Dan Williams7cc5bf92008-07-08 11:58:21 -0700285 chan->client_count--;
286 }
Chris Leechc13c8262006-05-23 17:18:44 -0700287 }
288
289 mutex_unlock(&dma_list_mutex);
290}
291
292/**
Dan Williamsd379b012007-07-09 11:56:42 -0700293 * dma_async_client_register - register a &dma_client
294 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
Chris Leechc13c8262006-05-23 17:18:44 -0700295 */
Dan Williamsd379b012007-07-09 11:56:42 -0700296void dma_async_client_register(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700297{
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700298 /* validate client data */
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave);
301
Chris Leechc13c8262006-05-23 17:18:44 -0700302 mutex_lock(&dma_list_mutex);
303 list_add_tail(&client->global_node, &dma_client_list);
304 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700305}
David Brownell765e3d82007-03-16 13:38:05 -0800306EXPORT_SYMBOL(dma_async_client_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700307
308/**
309 * dma_async_client_unregister - unregister a client and free the &dma_client
Randy Dunlap65088712006-07-03 19:45:31 -0700310 * @client: &dma_client to free
Chris Leechc13c8262006-05-23 17:18:44 -0700311 *
312 * Force frees any allocated DMA channels, frees the &dma_client memory
313 */
314void dma_async_client_unregister(struct dma_client *client)
315{
Dan Williamsd379b012007-07-09 11:56:42 -0700316 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700317 struct dma_chan *chan;
Dan Williamsd379b012007-07-09 11:56:42 -0700318 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700319
320 if (!client)
321 return;
322
Chris Leechc13c8262006-05-23 17:18:44 -0700323 mutex_lock(&dma_list_mutex);
Dan Williamsd379b012007-07-09 11:56:42 -0700324 /* free all channels the client is holding */
325 list_for_each_entry(device, &dma_device_list, global_node)
326 list_for_each_entry(chan, &device->channels, device_node) {
327 ack = client->event_callback(client, chan,
328 DMA_RESOURCE_REMOVED);
329
Dan Williams7cc5bf92008-07-08 11:58:21 -0700330 if (ack == DMA_ACK) {
Dan Williamsd379b012007-07-09 11:56:42 -0700331 dma_chan_put(chan);
Dan Williams7cc5bf92008-07-08 11:58:21 -0700332 chan->client_count--;
333 }
Dan Williamsd379b012007-07-09 11:56:42 -0700334 }
335
Chris Leechc13c8262006-05-23 17:18:44 -0700336 list_del(&client->global_node);
337 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700338}
David Brownell765e3d82007-03-16 13:38:05 -0800339EXPORT_SYMBOL(dma_async_client_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700340
341/**
Dan Williamsd379b012007-07-09 11:56:42 -0700342 * dma_async_client_chan_request - send all available channels to the
343 * client that satisfy the capability mask
344 * @client - requester
Chris Leechc13c8262006-05-23 17:18:44 -0700345 */
Dan Williamsd379b012007-07-09 11:56:42 -0700346void dma_async_client_chan_request(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700347{
Dan Williamsd379b012007-07-09 11:56:42 -0700348 mutex_lock(&dma_list_mutex);
349 dma_client_chan_alloc(client);
350 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700351}
David Brownell765e3d82007-03-16 13:38:05 -0800352EXPORT_SYMBOL(dma_async_client_chan_request);
Chris Leechc13c8262006-05-23 17:18:44 -0700353
354/**
Randy Dunlap65088712006-07-03 19:45:31 -0700355 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700356 * @device: &dma_device
357 */
358int dma_async_device_register(struct dma_device *device)
359{
360 static int id;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800361 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700362 struct dma_chan* chan;
363
364 if (!device)
365 return -ENODEV;
366
Dan Williams7405f742007-01-02 11:10:43 -0700367 /* validate device routines */
368 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
369 !device->device_prep_dma_memcpy);
370 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
371 !device->device_prep_dma_xor);
372 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
373 !device->device_prep_dma_zero_sum);
374 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
375 !device->device_prep_dma_memset);
Zhang Wei9b941c62008-03-13 17:45:28 -0700376 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700377 !device->device_prep_dma_interrupt);
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700378 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
379 !device->device_prep_slave_sg);
380 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
381 !device->device_terminate_all);
Dan Williams7405f742007-01-02 11:10:43 -0700382
383 BUG_ON(!device->device_alloc_chan_resources);
384 BUG_ON(!device->device_free_chan_resources);
Dan Williams7405f742007-01-02 11:10:43 -0700385 BUG_ON(!device->device_is_tx_complete);
386 BUG_ON(!device->device_issue_pending);
387 BUG_ON(!device->dev);
388
Chris Leechc13c8262006-05-23 17:18:44 -0700389 init_completion(&device->done);
390 kref_init(&device->refcount);
Dan Williamsb0b42b12008-12-03 17:17:07 -0700391
392 mutex_lock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700393 device->dev_id = id++;
Dan Williamsb0b42b12008-12-03 17:17:07 -0700394 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700395
396 /* represent channels in sysfs. Probably want devs too */
397 list_for_each_entry(chan, &device->channels, device_node) {
398 chan->local = alloc_percpu(typeof(*chan->local));
399 if (chan->local == NULL)
400 continue;
401
402 chan->chan_id = chancnt++;
Tony Jones891f78e2007-09-25 02:03:03 +0200403 chan->dev.class = &dma_devclass;
Haavard Skinnemoen1099dc72008-07-08 11:58:05 -0700404 chan->dev.parent = device->dev;
Kay Sievers06190d82008-11-11 13:12:33 -0700405 dev_set_name(&chan->dev, "dma%dchan%d",
406 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700407
Tony Jones891f78e2007-09-25 02:03:03 +0200408 rc = device_register(&chan->dev);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800409 if (rc) {
410 chancnt--;
411 free_percpu(chan->local);
412 chan->local = NULL;
413 goto err_out;
414 }
415
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800416 /* One for the channel, one of the class device */
417 kref_get(&device->refcount);
Chris Leechc13c8262006-05-23 17:18:44 -0700418 kref_get(&device->refcount);
Dan Williamsd379b012007-07-09 11:56:42 -0700419 kref_init(&chan->refcount);
Dan Williams7cc5bf92008-07-08 11:58:21 -0700420 chan->client_count = 0;
Dan Williamsd379b012007-07-09 11:56:42 -0700421 chan->slow_ref = 0;
422 INIT_RCU_HEAD(&chan->rcu);
Chris Leechc13c8262006-05-23 17:18:44 -0700423 }
424
425 mutex_lock(&dma_list_mutex);
426 list_add_tail(&device->global_node, &dma_device_list);
427 mutex_unlock(&dma_list_mutex);
428
Dan Williamsd379b012007-07-09 11:56:42 -0700429 dma_clients_notify_available();
Chris Leechc13c8262006-05-23 17:18:44 -0700430
431 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800432
433err_out:
434 list_for_each_entry(chan, &device->channels, device_node) {
435 if (chan->local == NULL)
436 continue;
437 kref_put(&device->refcount, dma_async_device_cleanup);
Tony Jones891f78e2007-09-25 02:03:03 +0200438 device_unregister(&chan->dev);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800439 chancnt--;
440 free_percpu(chan->local);
441 }
442 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700443}
David Brownell765e3d82007-03-16 13:38:05 -0800444EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700445
446/**
Randy Dunlap65088712006-07-03 19:45:31 -0700447 * dma_async_device_cleanup - function called when all references are released
448 * @kref: kernel reference object
Chris Leechc13c8262006-05-23 17:18:44 -0700449 */
450static void dma_async_device_cleanup(struct kref *kref)
451{
452 struct dma_device *device;
453
454 device = container_of(kref, struct dma_device, refcount);
455 complete(&device->done);
456}
457
Randy Dunlap65088712006-07-03 19:45:31 -0700458/**
459 * dma_async_device_unregister - unregisters DMA devices
460 * @device: &dma_device
461 */
462void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700463{
464 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700465
466 mutex_lock(&dma_list_mutex);
467 list_del(&device->global_node);
468 mutex_unlock(&dma_list_mutex);
469
470 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700471 dma_clients_notify_removed(chan);
Tony Jones891f78e2007-09-25 02:03:03 +0200472 device_unregister(&chan->dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700473 dma_chan_release(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700474 }
Chris Leechc13c8262006-05-23 17:18:44 -0700475
476 kref_put(&device->refcount, dma_async_device_cleanup);
477 wait_for_completion(&device->done);
478}
David Brownell765e3d82007-03-16 13:38:05 -0800479EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700480
Dan Williams7405f742007-01-02 11:10:43 -0700481/**
482 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
483 * @chan: DMA channel to offload copy to
484 * @dest: destination address (virtual)
485 * @src: source address (virtual)
486 * @len: length
487 *
488 * Both @dest and @src must be mappable to a bus address according to the
489 * DMA mapping API rules for streaming mappings.
490 * Both @dest and @src must stay memory resident (kernel memory or locked
491 * user space pages).
492 */
493dma_cookie_t
494dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
495 void *src, size_t len)
496{
497 struct dma_device *dev = chan->device;
498 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700499 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700500 dma_cookie_t cookie;
501 int cpu;
502
Dan Williams00367312008-02-02 19:49:57 -0700503 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
504 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700505 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
506 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700507
508 if (!tx) {
509 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
510 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700511 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700512 }
Dan Williams7405f742007-01-02 11:10:43 -0700513
Dan Williams7405f742007-01-02 11:10:43 -0700514 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700515 cookie = tx->tx_submit(tx);
516
517 cpu = get_cpu();
518 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
519 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
520 put_cpu();
521
522 return cookie;
523}
524EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
525
526/**
527 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
528 * @chan: DMA channel to offload copy to
529 * @page: destination page
530 * @offset: offset in page to copy to
531 * @kdata: source address (virtual)
532 * @len: length
533 *
534 * Both @page/@offset and @kdata must be mappable to a bus address according
535 * to the DMA mapping API rules for streaming mappings.
536 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
537 * locked user space pages)
538 */
539dma_cookie_t
540dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
541 unsigned int offset, void *kdata, size_t len)
542{
543 struct dma_device *dev = chan->device;
544 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700545 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700546 dma_cookie_t cookie;
547 int cpu;
548
Dan Williams00367312008-02-02 19:49:57 -0700549 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
550 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700551 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
552 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700553
554 if (!tx) {
555 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
556 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700557 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700558 }
Dan Williams7405f742007-01-02 11:10:43 -0700559
Dan Williams7405f742007-01-02 11:10:43 -0700560 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700561 cookie = tx->tx_submit(tx);
562
563 cpu = get_cpu();
564 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
565 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
566 put_cpu();
567
568 return cookie;
569}
570EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
571
572/**
573 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
574 * @chan: DMA channel to offload copy to
575 * @dest_pg: destination page
576 * @dest_off: offset in page to copy to
577 * @src_pg: source page
578 * @src_off: offset in page to copy from
579 * @len: length
580 *
581 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
582 * address according to the DMA mapping API rules for streaming mappings.
583 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
584 * (kernel memory or locked user space pages).
585 */
586dma_cookie_t
587dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
588 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
589 size_t len)
590{
591 struct dma_device *dev = chan->device;
592 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700593 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700594 dma_cookie_t cookie;
595 int cpu;
596
Dan Williams00367312008-02-02 19:49:57 -0700597 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
598 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
599 DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700600 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
601 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700602
603 if (!tx) {
604 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
605 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700606 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700607 }
Dan Williams7405f742007-01-02 11:10:43 -0700608
Dan Williams7405f742007-01-02 11:10:43 -0700609 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700610 cookie = tx->tx_submit(tx);
611
612 cpu = get_cpu();
613 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
614 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
615 put_cpu();
616
617 return cookie;
618}
619EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
620
621void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
622 struct dma_chan *chan)
623{
624 tx->chan = chan;
625 spin_lock_init(&tx->lock);
Dan Williams7405f742007-01-02 11:10:43 -0700626}
627EXPORT_SYMBOL(dma_async_tx_descriptor_init);
628
Dan Williams07f22112009-01-05 17:14:31 -0700629/* dma_wait_for_async_tx - spin wait for a transaction to complete
630 * @tx: in-flight transaction to wait on
631 *
632 * This routine assumes that tx was obtained from a call to async_memcpy,
633 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
634 * and submitted). Walking the parent chain is only meant to cover for DMA
635 * drivers that do not implement the DMA_INTERRUPT capability and may race with
636 * the driver's descriptor cleanup routine.
637 */
638enum dma_status
639dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
640{
641 enum dma_status status;
642 struct dma_async_tx_descriptor *iter;
643 struct dma_async_tx_descriptor *parent;
644
645 if (!tx)
646 return DMA_SUCCESS;
647
648 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
649 " %s\n", __func__, dev_name(&tx->chan->dev));
650
651 /* poll through the dependency chain, return when tx is complete */
652 do {
653 iter = tx;
654
655 /* find the root of the unsubmitted dependency chain */
656 do {
657 parent = iter->parent;
658 if (!parent)
659 break;
660 else
661 iter = parent;
662 } while (parent);
663
664 /* there is a small window for ->parent == NULL and
665 * ->cookie == -EBUSY
666 */
667 while (iter->cookie == -EBUSY)
668 cpu_relax();
669
670 status = dma_sync_wait(iter->chan, iter->cookie);
671 } while (status == DMA_IN_PROGRESS || (iter != tx));
672
673 return status;
674}
675EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
676
677/* dma_run_dependencies - helper routine for dma drivers to process
678 * (start) dependent operations on their target channel
679 * @tx: transaction with dependencies
680 */
681void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
682{
683 struct dma_async_tx_descriptor *dep = tx->next;
684 struct dma_async_tx_descriptor *dep_next;
685 struct dma_chan *chan;
686
687 if (!dep)
688 return;
689
690 chan = dep->chan;
691
692 /* keep submitting up until a channel switch is detected
693 * in that case we will be called again as a result of
694 * processing the interrupt from async_tx_channel_switch
695 */
696 for (; dep; dep = dep_next) {
697 spin_lock_bh(&dep->lock);
698 dep->parent = NULL;
699 dep_next = dep->next;
700 if (dep_next && dep_next->chan == chan)
701 dep->next = NULL; /* ->next will be submitted */
702 else
703 dep_next = NULL; /* submit current dep and terminate */
704 spin_unlock_bh(&dep->lock);
705
706 dep->tx_submit(dep);
707 }
708
709 chan->device->device_issue_pending(chan);
710}
711EXPORT_SYMBOL_GPL(dma_run_dependencies);
712
Chris Leechc13c8262006-05-23 17:18:44 -0700713static int __init dma_bus_init(void)
714{
715 mutex_init(&dma_list_mutex);
716 return class_register(&dma_devclass);
717}
Chris Leechc13c8262006-05-23 17:18:44 -0700718subsys_initcall(dma_bus_init);
719