blob: 5b7c26395b4458d0c29b8ef4717ee385b594d371 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/arm/common/dmabounce.c
3 *
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
Erik Hovland3a2916a2006-03-22 21:02:11 +00008 * RAM, the remainder of memory is at the top and the DMA memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * can be marked as ZONE_DMA. Anything beyond that such as discontigous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
12 *
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/device.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmapool.h>
31#include <linux/list.h>
32
Russell King14eb75b2005-06-20 16:56:08 +010033#include <asm/cacheflush.h>
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#undef DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#undef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +000037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#ifdef STATS
39#define DO_STATS(X) do { X ; } while (0)
40#else
41#define DO_STATS(X) do { } while (0)
42#endif
43
44/* ************************************************** */
45
46struct safe_buffer {
47 struct list_head node;
48
49 /* original request */
50 void *ptr;
51 size_t size;
52 int direction;
53
54 /* safe buffer info */
Russell Kingcb7610d2005-10-30 21:12:08 +000055 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 void *safe;
57 dma_addr_t safe_dma_addr;
58};
59
Russell Kingcb7610d2005-10-30 21:12:08 +000060struct dmabounce_pool {
61 unsigned long size;
62 struct dma_pool *pool;
63#ifdef STATS
64 unsigned long allocs;
65#endif
66};
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068struct dmabounce_device_info {
69 struct list_head node;
70
71 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 struct list_head safe_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 unsigned long total_allocs;
75 unsigned long map_op_count;
76 unsigned long bounce_count;
77#endif
Russell Kingcb7610d2005-10-30 21:12:08 +000078 struct dmabounce_pool small;
79 struct dmabounce_pool large;
Kevin Hilman823588c2006-06-22 22:27:14 +010080
81 rwlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082};
83
84static LIST_HEAD(dmabounce_devs);
85
86#ifdef STATS
87static void print_alloc_stats(struct dmabounce_device_info *device_info)
88{
89 printk(KERN_INFO
90 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
91 device_info->dev->bus_id,
Russell Kingcb7610d2005-10-30 21:12:08 +000092 device_info->small.allocs, device_info->large.allocs,
93 device_info->total_allocs - device_info->small.allocs -
94 device_info->large.allocs,
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 device_info->total_allocs);
96}
97#endif
98
99/* find the given device in the dmabounce device list */
100static inline struct dmabounce_device_info *
101find_dmabounce_dev(struct device *dev)
102{
Russell Kingb46a58f2005-06-22 21:25:58 +0100103 struct dmabounce_device_info *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Russell Kingb46a58f2005-06-22 21:25:58 +0100105 list_for_each_entry(d, &dmabounce_devs, node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 if (d->dev == dev)
107 return d;
Russell Kingb46a58f2005-06-22 21:25:58 +0100108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 return NULL;
110}
111
112
113/* allocate a 'safe' buffer and keep track of it */
114static inline struct safe_buffer *
115alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
Russell Kingcb7610d2005-10-30 21:12:08 +0000116 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
118 struct safe_buffer *buf;
Russell Kingcb7610d2005-10-30 21:12:08 +0000119 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 struct device *dev = device_info->dev;
Kevin Hilman823588c2006-06-22 22:27:14 +0100121 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
124 __func__, ptr, size, dir);
125
Russell Kingcb7610d2005-10-30 21:12:08 +0000126 if (size <= device_info->small.size) {
127 pool = &device_info->small;
128 } else if (size <= device_info->large.size) {
129 pool = &device_info->large;
130 } else {
131 pool = NULL;
132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
135 if (buf == NULL) {
136 dev_warn(dev, "%s: kmalloc failed\n", __func__);
137 return NULL;
138 }
139
Russell Kingcb7610d2005-10-30 21:12:08 +0000140 buf->ptr = ptr;
141 buf->size = size;
142 buf->direction = dir;
143 buf->pool = pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Russell Kingcb7610d2005-10-30 21:12:08 +0000145 if (pool) {
146 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
147 &buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 } else {
Russell Kingcb7610d2005-10-30 21:12:08 +0000149 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
150 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 }
152
Russell Kingcb7610d2005-10-30 21:12:08 +0000153 if (buf->safe == NULL) {
154 dev_warn(dev,
155 "%s: could not alloc dma memory (size=%d)\n",
156 __func__, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 kfree(buf);
158 return NULL;
159 }
160
161#ifdef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +0000162 if (pool)
163 pool->allocs++;
164 device_info->total_allocs++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 if (device_info->total_allocs % 1000 == 0)
166 print_alloc_stats(device_info);
167#endif
168
Kevin Hilman823588c2006-06-22 22:27:14 +0100169 write_lock_irqsave(&device_info->lock, flags);
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 list_add(&buf->node, &device_info->safe_buffers);
172
Kevin Hilman823588c2006-06-22 22:27:14 +0100173 write_unlock_irqrestore(&device_info->lock, flags);
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 return buf;
176}
177
178/* determine if a buffer is from our "safe" pool */
179static inline struct safe_buffer *
180find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
181{
Kevin Hilman823588c2006-06-22 22:27:14 +0100182 struct safe_buffer *b = NULL;
183 unsigned long flags;
184
185 read_lock_irqsave(&device_info->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Russell Kingb46a58f2005-06-22 21:25:58 +0100187 list_for_each_entry(b, &device_info->safe_buffers, node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 if (b->safe_dma_addr == safe_dma_addr)
Kevin Hilman823588c2006-06-22 22:27:14 +0100189 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Kevin Hilman823588c2006-06-22 22:27:14 +0100191 read_unlock_irqrestore(&device_info->lock, flags);
192 return b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195static inline void
196free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
197{
Kevin Hilman823588c2006-06-22 22:27:14 +0100198 unsigned long flags;
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
201
Kevin Hilman823588c2006-06-22 22:27:14 +0100202 write_lock_irqsave(&device_info->lock, flags);
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 list_del(&buf->node);
205
Kevin Hilman823588c2006-06-22 22:27:14 +0100206 write_unlock_irqrestore(&device_info->lock, flags);
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 if (buf->pool)
Russell Kingcb7610d2005-10-30 21:12:08 +0000209 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 else
211 dma_free_coherent(device_info->dev, buf->size, buf->safe,
212 buf->safe_dma_addr);
213
214 kfree(buf);
215}
216
217/* ************************************************** */
218
219#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220static void print_map_stats(struct dmabounce_device_info *device_info)
221{
Russell Kingcb7610d2005-10-30 21:12:08 +0000222 dev_info(device_info->dev,
223 "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 device_info->map_op_count, device_info->bounce_count);
225}
226#endif
227
228static inline dma_addr_t
229map_single(struct device *dev, void *ptr, size_t size,
230 enum dma_data_direction dir)
231{
232 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
233 dma_addr_t dma_addr;
234 int needs_bounce = 0;
235
236 if (device_info)
237 DO_STATS ( device_info->map_op_count++ );
238
239 dma_addr = virt_to_dma(dev, ptr);
240
241 if (dev->dma_mask) {
242 unsigned long mask = *dev->dma_mask;
243 unsigned long limit;
244
245 limit = (mask + 1) & ~mask;
246 if (limit && size > limit) {
247 dev_err(dev, "DMA mapping too big (requested %#x "
248 "mask %#Lx)\n", size, *dev->dma_mask);
249 return ~0;
250 }
251
252 /*
253 * Figure out if we need to bounce from the DMA mask.
254 */
255 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
256 }
257
258 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
259 struct safe_buffer *buf;
260
261 buf = alloc_safe_buffer(device_info, ptr, size, dir);
262 if (buf == 0) {
263 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
264 __func__, ptr);
265 return 0;
266 }
267
268 dev_dbg(dev,
269 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
270 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
271 buf->safe, (void *) buf->safe_dma_addr);
272
273 if ((dir == DMA_TO_DEVICE) ||
274 (dir == DMA_BIDIRECTIONAL)) {
275 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
276 __func__, ptr, buf->safe, size);
277 memcpy(buf->safe, ptr, size);
278 }
Russell Kingcb7610d2005-10-30 21:12:08 +0000279 ptr = buf->safe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281 dma_addr = buf->safe_dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 }
283
Russell Kingcb7610d2005-10-30 21:12:08 +0000284 consistent_sync(ptr, size, dir);
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 return dma_addr;
287}
288
289static inline void
290unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
291 enum dma_data_direction dir)
292{
293 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
294 struct safe_buffer *buf = NULL;
295
296 /*
297 * Trying to unmap an invalid mapping
298 */
Russell Kingcb7610d2005-10-30 21:12:08 +0000299 if (dma_mapping_error(dma_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 dev_err(dev, "Trying to unmap invalid mapping\n");
301 return;
302 }
303
304 if (device_info)
305 buf = find_safe_buffer(device_info, dma_addr);
306
307 if (buf) {
308 BUG_ON(buf->size != size);
309
310 dev_dbg(dev,
311 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
312 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
313 buf->safe, (void *) buf->safe_dma_addr);
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 DO_STATS ( device_info->bounce_count++ );
316
Russell King5abc1002005-06-20 12:31:14 +0100317 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
318 unsigned long ptr;
319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 dev_dbg(dev,
321 "%s: copy back safe %p to unsafe %p size %d\n",
322 __func__, buf->safe, buf->ptr, size);
323 memcpy(buf->ptr, buf->safe, size);
Russell King5abc1002005-06-20 12:31:14 +0100324
325 /*
326 * DMA buffers must have the same cache properties
327 * as if they were really used for DMA - which means
328 * data must be written back to RAM. Note that
329 * we don't use dmac_flush_range() here for the
330 * bidirectional case because we know the cache
331 * lines will be coherent with the data written.
332 */
333 ptr = (unsigned long)buf->ptr;
334 dmac_clean_range(ptr, ptr + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336 free_safe_buffer(device_info, buf);
337 }
338}
339
340static inline void
341sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
342 enum dma_data_direction dir)
343{
344 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
345 struct safe_buffer *buf = NULL;
346
347 if (device_info)
348 buf = find_safe_buffer(device_info, dma_addr);
349
350 if (buf) {
351 /*
352 * Both of these checks from original code need to be
353 * commented out b/c some drivers rely on the following:
354 *
355 * 1) Drivers may map a large chunk of memory into DMA space
356 * but only sync a small portion of it. Good example is
357 * allocating a large buffer, mapping it, and then
358 * breaking it up into small descriptors. No point
359 * in syncing the whole buffer if you only have to
360 * touch one descriptor.
361 *
362 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
363 * usually only synced in one dir at a time.
364 *
365 * See drivers/net/eepro100.c for examples of both cases.
366 *
367 * -ds
368 *
369 * BUG_ON(buf->size != size);
370 * BUG_ON(buf->direction != dir);
371 */
372
373 dev_dbg(dev,
374 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
375 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
376 buf->safe, (void *) buf->safe_dma_addr);
377
378 DO_STATS ( device_info->bounce_count++ );
379
380 switch (dir) {
381 case DMA_FROM_DEVICE:
382 dev_dbg(dev,
383 "%s: copy back safe %p to unsafe %p size %d\n",
384 __func__, buf->safe, buf->ptr, size);
385 memcpy(buf->ptr, buf->safe, size);
386 break;
387 case DMA_TO_DEVICE:
388 dev_dbg(dev,
389 "%s: copy out unsafe %p to safe %p, size %d\n",
390 __func__,buf->ptr, buf->safe, size);
391 memcpy(buf->safe, buf->ptr, size);
392 break;
393 case DMA_BIDIRECTIONAL:
394 BUG(); /* is this allowed? what does it mean? */
395 default:
396 BUG();
397 }
398 consistent_sync(buf->safe, size, dir);
399 } else {
400 consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
401 }
402}
403
404/* ************************************************** */
405
406/*
407 * see if a buffer address is in an 'unsafe' range. if it is
408 * allocate a 'safe' buffer and copy the unsafe buffer into it.
409 * substitute the safe buffer for the unsafe one.
410 * (basically move the buffer from an unsafe area to a safe one)
411 */
412dma_addr_t
413dma_map_single(struct device *dev, void *ptr, size_t size,
414 enum dma_data_direction dir)
415{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 dma_addr_t dma_addr;
417
418 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
419 __func__, ptr, size, dir);
420
421 BUG_ON(dir == DMA_NONE);
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 dma_addr = map_single(dev, ptr, size, dir);
424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 return dma_addr;
426}
427
428/*
429 * see if a mapped address was really a "safe" buffer and if so, copy
430 * the data from the safe buffer back to the unsafe buffer and free up
431 * the safe buffer. (basically return things back to the way they
432 * should be)
433 */
434
435void
436dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
437 enum dma_data_direction dir)
438{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
440 __func__, (void *) dma_addr, size, dir);
441
442 BUG_ON(dir == DMA_NONE);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 unmap_single(dev, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445}
446
447int
448dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
449 enum dma_data_direction dir)
450{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 int i;
452
453 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
454 __func__, sg, nents, dir);
455
456 BUG_ON(dir == DMA_NONE);
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 for (i = 0; i < nents; i++, sg++) {
459 struct page *page = sg->page;
460 unsigned int offset = sg->offset;
461 unsigned int length = sg->length;
462 void *ptr = page_address(page) + offset;
463
464 sg->dma_address =
465 map_single(dev, ptr, length, dir);
466 }
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 return nents;
469}
470
471void
472dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
473 enum dma_data_direction dir)
474{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 int i;
476
477 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
478 __func__, sg, nents, dir);
479
480 BUG_ON(dir == DMA_NONE);
481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 for (i = 0; i < nents; i++, sg++) {
483 dma_addr_t dma_addr = sg->dma_address;
484 unsigned int length = sg->length;
485
486 unmap_single(dev, dma_addr, length, dir);
487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
489
490void
491dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
492 enum dma_data_direction dir)
493{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
495 __func__, (void *) dma_addr, size, dir);
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 sync_single(dev, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
499
500void
501dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
502 enum dma_data_direction dir)
503{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
505 __func__, (void *) dma_addr, size, dir);
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 sync_single(dev, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508}
509
510void
511dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
512 enum dma_data_direction dir)
513{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 int i;
515
516 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
517 __func__, sg, nents, dir);
518
519 BUG_ON(dir == DMA_NONE);
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 for (i = 0; i < nents; i++, sg++) {
522 dma_addr_t dma_addr = sg->dma_address;
523 unsigned int length = sg->length;
524
525 sync_single(dev, dma_addr, length, dir);
526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
529void
530dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
531 enum dma_data_direction dir)
532{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 int i;
534
535 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
536 __func__, sg, nents, dir);
537
538 BUG_ON(dir == DMA_NONE);
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 for (i = 0; i < nents; i++, sg++) {
541 dma_addr_t dma_addr = sg->dma_address;
542 unsigned int length = sg->length;
543
544 sync_single(dev, dma_addr, length, dir);
545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
Russell Kingcb7610d2005-10-30 21:12:08 +0000548static int
549dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
550 unsigned long size)
551{
552 pool->size = size;
553 DO_STATS(pool->allocs = 0);
554 pool->pool = dma_pool_create(name, dev, size,
555 0 /* byte alignment */,
556 0 /* no page-crossing issues */);
557
558 return pool->pool ? 0 : -ENOMEM;
559}
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561int
562dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
563 unsigned long large_buffer_size)
564{
565 struct dmabounce_device_info *device_info;
Russell Kingcb7610d2005-10-30 21:12:08 +0000566 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
569 if (!device_info) {
570 printk(KERN_ERR
571 "Could not allocated dmabounce_device_info for %s",
572 dev->bus_id);
573 return -ENOMEM;
574 }
575
Russell Kingcb7610d2005-10-30 21:12:08 +0000576 ret = dmabounce_init_pool(&device_info->small, dev,
577 "small_dmabounce_pool", small_buffer_size);
578 if (ret) {
579 dev_err(dev,
580 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
581 small_buffer_size);
582 goto err_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 }
584
585 if (large_buffer_size) {
Russell Kingcb7610d2005-10-30 21:12:08 +0000586 ret = dmabounce_init_pool(&device_info->large, dev,
587 "large_dmabounce_pool",
588 large_buffer_size);
589 if (ret) {
590 dev_err(dev,
591 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
592 large_buffer_size);
593 goto err_destroy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
595 }
596
597 device_info->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 INIT_LIST_HEAD(&device_info->safe_buffers);
Kevin Hilman823588c2006-06-22 22:27:14 +0100599 rwlock_init(&device_info->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 device_info->total_allocs = 0;
603 device_info->map_op_count = 0;
604 device_info->bounce_count = 0;
605#endif
606
607 list_add(&device_info->node, &dmabounce_devs);
608
609 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
610 dev->bus_id, dev->bus->name);
611
612 return 0;
Russell Kingcb7610d2005-10-30 21:12:08 +0000613
614 err_destroy:
615 dma_pool_destroy(device_info->small.pool);
616 err_free:
617 kfree(device_info);
618 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
621void
622dmabounce_unregister_dev(struct device *dev)
623{
624 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
625
626 if (!device_info) {
627 printk(KERN_WARNING
628 "%s: Never registered with dmabounce but attempting" \
629 "to unregister!\n", dev->bus_id);
630 return;
631 }
632
633 if (!list_empty(&device_info->safe_buffers)) {
634 printk(KERN_ERR
635 "%s: Removing from dmabounce with pending buffers!\n",
636 dev->bus_id);
637 BUG();
638 }
639
Russell Kingcb7610d2005-10-30 21:12:08 +0000640 if (device_info->small.pool)
641 dma_pool_destroy(device_info->small.pool);
642 if (device_info->large.pool)
643 dma_pool_destroy(device_info->large.pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
645#ifdef STATS
646 print_alloc_stats(device_info);
647 print_map_stats(device_info);
648#endif
649
650 list_del(&device_info->node);
651
652 kfree(device_info);
653
654 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
655 dev->bus_id, dev->bus->name);
656}
657
658
659EXPORT_SYMBOL(dma_map_single);
660EXPORT_SYMBOL(dma_unmap_single);
661EXPORT_SYMBOL(dma_map_sg);
662EXPORT_SYMBOL(dma_unmap_sg);
663EXPORT_SYMBOL(dma_sync_single);
664EXPORT_SYMBOL(dma_sync_sg);
665EXPORT_SYMBOL(dmabounce_register_dev);
666EXPORT_SYMBOL(dmabounce_unregister_dev);
667
668MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
669MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
670MODULE_LICENSE("GPL");