blob: f1dc3fc71bc270df5d799ad5129811c3085b6f35 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Dynamic DMA mapping support.
3 *
4 * We never have any address translations to worry about, so this
5 * is just alloc/free.
6 */
7
8#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
Greg Ungerer85e026b2007-07-25 22:07:20 +100011#include <linux/device.h>
Greg Ungererbf5fe9e2009-01-28 17:29:35 +100012#include <linux/dma-mapping.h>
Paul Gortmaker12799fe2011-08-01 10:55:53 -040013#include <linux/export.h>
Greg Ungererec40f952009-04-17 23:11:38 +100014#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16void *dma_alloc_coherent(struct device *dev, size_t size,
Greg Ungererbf5fe9e2009-01-28 17:29:35 +100017 dma_addr_t *dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070018{
19 void *ret;
20 /* ignore region specifiers */
21 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
22
23 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
24 gfp |= GFP_DMA;
25 ret = (void *)__get_free_pages(gfp, get_order(size));
26
27 if (ret != NULL) {
28 memset(ret, 0, size);
29 *dma_handle = virt_to_phys(ret);
30 }
31 return ret;
32}
33
34void dma_free_coherent(struct device *dev, size_t size,
35 void *vaddr, dma_addr_t dma_handle)
36{
37 free_pages((unsigned long)vaddr, get_order(size));
38}
Greg Ungererbf5fe9e2009-01-28 17:29:35 +100039
Greg Ungererec40f952009-04-17 23:11:38 +100040void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
41 size_t size, enum dma_data_direction dir)
Greg Ungererbf5fe9e2009-01-28 17:29:35 +100042{
Greg Ungererec40f952009-04-17 23:11:38 +100043 switch (dir) {
44 case DMA_TO_DEVICE:
45 flush_dcache_range(handle, size);
46 break;
47 case DMA_FROM_DEVICE:
48 /* Should be clear already */
49 break;
50 default:
51 if (printk_ratelimit())
52 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
53 break;
54 }
Greg Ungererbf5fe9e2009-01-28 17:29:35 +100055}
56
Greg Ungererec40f952009-04-17 23:11:38 +100057EXPORT_SYMBOL(dma_sync_single_for_device);
58dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
59 enum dma_data_direction dir)
60{
61 dma_addr_t handle = virt_to_phys(addr);
62 flush_dcache_range(handle, size);
63 return handle;
64}
65EXPORT_SYMBOL(dma_map_single);
66
67dma_addr_t dma_map_page(struct device *dev, struct page *page,
68 unsigned long offset, size_t size,
69 enum dma_data_direction dir)
70{
71 dma_addr_t handle = page_to_phys(page) + offset;
72 dma_sync_single_for_device(dev, handle, size, dir);
73 return handle;
74}
75EXPORT_SYMBOL(dma_map_page);