blob: be81572b89c2cf2a4f4ac84ba92ec1229bedf2cd [file] [log] [blame]
Bryan Wu1394f032007-05-06 14:50:22 -07001/*
Robin Getz96f10502009-09-24 14:11:24 +00002 * Dynamic DMA mapping support
Bryan Wu1394f032007-05-06 14:50:22 -07003 *
Robin Getz96f10502009-09-24 14:11:24 +00004 * Copyright 2005-2009 Analog Devices Inc.
Bryan Wu1394f032007-05-06 14:50:22 -07005 *
Robin Getz96f10502009-09-24 14:11:24 +00006 * Licensed under the GPL-2 or later
Bryan Wu1394f032007-05-06 14:50:22 -07007 */
8
9#include <linux/types.h>
10#include <linux/mm.h>
11#include <linux/string.h>
12#include <linux/bootmem.h>
13#include <linux/spinlock.h>
14#include <linux/device.h>
15#include <linux/dma-mapping.h>
Mike Frysinger1f83b8f2007-07-12 22:58:21 +080016#include <linux/io.h>
Adrian Bunk82861922007-10-24 14:09:25 +020017#include <linux/scatterlist.h>
Bryan Wu1394f032007-05-06 14:50:22 -070018#include <asm/cacheflush.h>
Bryan Wu1394f032007-05-06 14:50:22 -070019#include <asm/bfin-global.h>
20
21static spinlock_t dma_page_lock;
Michael Hennerichcb5ae602009-09-23 11:32:52 +000022static unsigned long *dma_page;
Bryan Wu1394f032007-05-06 14:50:22 -070023static unsigned int dma_pages;
24static unsigned long dma_base;
25static unsigned long dma_size;
26static unsigned int dma_initialized;
27
28void dma_alloc_init(unsigned long start, unsigned long end)
29{
30 spin_lock_init(&dma_page_lock);
31 dma_initialized = 0;
32
Michael Hennerichcb5ae602009-09-23 11:32:52 +000033 dma_page = (unsigned long *)__get_free_page(GFP_KERNEL);
Bryan Wu1394f032007-05-06 14:50:22 -070034 memset(dma_page, 0, PAGE_SIZE);
35 dma_base = PAGE_ALIGN(start);
36 dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
37 dma_pages = dma_size >> PAGE_SHIFT;
38 memset((void *)dma_base, 0, DMA_UNCACHED_REGION);
39 dma_initialized = 1;
40
Harvey Harrisonb85d8582008-04-23 09:39:01 +080041 printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__,
Bryan Wu1394f032007-05-06 14:50:22 -070042 dma_page, dma_pages, dma_base);
43}
44
45static inline unsigned int get_pages(size_t size)
46{
47 return ((size - 1) >> PAGE_SHIFT) + 1;
48}
49
50static unsigned long __alloc_dma_pages(unsigned int pages)
51{
52 unsigned long ret = 0, flags;
53 int i, count = 0;
54
55 if (dma_initialized == 0)
56 dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);
57
58 spin_lock_irqsave(&dma_page_lock, flags);
59
60 for (i = 0; i < dma_pages;) {
Michael Hennerichcb5ae602009-09-23 11:32:52 +000061 if (test_bit(i++, dma_page) == 0) {
Bryan Wu1394f032007-05-06 14:50:22 -070062 if (++count == pages) {
63 while (count--)
Michael Hennerichcb5ae602009-09-23 11:32:52 +000064 __set_bit(--i, dma_page);
65
Bryan Wu1394f032007-05-06 14:50:22 -070066 ret = dma_base + (i << PAGE_SHIFT);
67 break;
68 }
69 } else
70 count = 0;
71 }
72 spin_unlock_irqrestore(&dma_page_lock, flags);
73 return ret;
74}
75
76static void __free_dma_pages(unsigned long addr, unsigned int pages)
77{
78 unsigned long page = (addr - dma_base) >> PAGE_SHIFT;
79 unsigned long flags;
80 int i;
81
82 if ((page + pages) > dma_pages) {
Harvey Harrisonb85d8582008-04-23 09:39:01 +080083 printk(KERN_ERR "%s: freeing outside range.\n", __func__);
Bryan Wu1394f032007-05-06 14:50:22 -070084 BUG();
85 }
86
87 spin_lock_irqsave(&dma_page_lock, flags);
Michael Hennerichcb5ae602009-09-23 11:32:52 +000088 for (i = page; i < page + pages; i++)
89 __clear_bit(i, dma_page);
90
Bryan Wu1394f032007-05-06 14:50:22 -070091 spin_unlock_irqrestore(&dma_page_lock, flags);
92}
93
94void *dma_alloc_coherent(struct device *dev, size_t size,
95 dma_addr_t * dma_handle, gfp_t gfp)
96{
97 void *ret;
98
99 ret = (void *)__alloc_dma_pages(get_pages(size));
100
101 if (ret) {
102 memset(ret, 0, size);
103 *dma_handle = virt_to_phys(ret);
104 }
105
106 return ret;
107}
108EXPORT_SYMBOL(dma_alloc_coherent);
109
110void
111dma_free_coherent(struct device *dev, size_t size, void *vaddr,
112 dma_addr_t dma_handle)
113{
114 __free_dma_pages((unsigned long)vaddr, get_pages(size));
115}
116EXPORT_SYMBOL(dma_free_coherent);
117
118/*
119 * Dummy functions defined for some existing drivers
120 */
121
122dma_addr_t
123dma_map_single(struct device *dev, void *ptr, size_t size,
124 enum dma_data_direction direction)
125{
126 BUG_ON(direction == DMA_NONE);
127
128 invalidate_dcache_range((unsigned long)ptr,
129 (unsigned long)ptr + size);
130
131 return (dma_addr_t) ptr;
132}
133EXPORT_SYMBOL(dma_map_single);
134
135int
136dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
137 enum dma_data_direction direction)
138{
139 int i;
140
141 BUG_ON(direction == DMA_NONE);
142
Sonic Zhangb07af762007-07-12 12:18:08 +0800143 for (i = 0; i < nents; i++, sg++) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200144 sg->dma_address = (dma_addr_t) sg_virt(sg);
Sonic Zhangb07af762007-07-12 12:18:08 +0800145
146 invalidate_dcache_range(sg_dma_address(sg),
147 sg_dma_address(sg) +
148 sg_dma_len(sg));
149 }
Bryan Wu1394f032007-05-06 14:50:22 -0700150
151 return nents;
152}
153EXPORT_SYMBOL(dma_map_sg);
154
155void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
156 enum dma_data_direction direction)
157{
158 BUG_ON(direction == DMA_NONE);
159}
160EXPORT_SYMBOL(dma_unmap_single);
161
162void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
163 int nhwentries, enum dma_data_direction direction)
164{
165 BUG_ON(direction == DMA_NONE);
166}
167EXPORT_SYMBOL(dma_unmap_sg);