blob: 8277982d0938d06aa390e6cdaa7af3d4d9d4e5bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/consistent.c
3 *
Paul Mundt8a7bcf02007-11-11 17:07:06 +09004 * Copyright (C) 2004 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Magnus Dammf93e97e2008-01-24 18:35:10 +09006 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/mm.h>
Magnus Damm1eca5c92008-07-16 19:02:54 +090013#include <linux/platform_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/dma-mapping.h>
Paul Mundt26ff6c12006-09-27 15:13:36 +090015#include <asm/cacheflush.h>
16#include <asm/addrspace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/io.h>
18
Magnus Dammf93e97e2008-01-24 18:35:10 +090019struct dma_coherent_mem {
20 void *virt_base;
21 u32 device_base;
22 int size;
23 int flags;
24 unsigned long *bitmap;
25};
26
27void *dma_alloc_coherent(struct device *dev, size_t size,
28 dma_addr_t *dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Magnus Damm2a3eeba2008-01-25 12:42:48 +090030 void *ret, *ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090031 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
32 int order = get_order(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Magnus Dammf93e97e2008-01-24 18:35:10 +090034 if (mem) {
35 int page = bitmap_find_free_region(mem->bitmap, mem->size,
36 order);
37 if (page >= 0) {
38 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
39 ret = mem->virt_base + (page << PAGE_SHIFT);
40 memset(ret, 0, size);
41 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 }
Magnus Dammf93e97e2008-01-24 18:35:10 +090043 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
44 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 }
46
Magnus Dammf93e97e2008-01-24 18:35:10 +090047 ret = (void *)__get_free_pages(gfp, order);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090048 if (!ret)
49 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Magnus Damm2a3eeba2008-01-25 12:42:48 +090051 memset(ret, 0, size);
52 /*
53 * Pages from the page allocator may have data present in
54 * cache. So flush the cache before using uncached memory.
55 */
56 dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
57
58 ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
59 if (!ret_nocache) {
60 free_pages((unsigned long)ret, order);
61 return NULL;
Magnus Dammf93e97e2008-01-24 18:35:10 +090062 }
Magnus Damm2a3eeba2008-01-25 12:42:48 +090063
64 *dma_handle = virt_to_phys(ret);
65 return ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090066}
67EXPORT_SYMBOL(dma_alloc_coherent);
68
69void dma_free_coherent(struct device *dev, size_t size,
70 void *vaddr, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Magnus Dammf93e97e2008-01-24 18:35:10 +090072 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
73 int order = get_order(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Magnus Dammf93e97e2008-01-24 18:35:10 +090075 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
76 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Paul Mundt8a7bcf02007-11-11 17:07:06 +090077
Magnus Dammf93e97e2008-01-24 18:35:10 +090078 bitmap_release_region(mem->bitmap, page, order);
79 } else {
80 WARN_ON(irqs_disabled()); /* for portability */
81 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090082 free_pages((unsigned long)phys_to_virt(dma_handle), order);
83 iounmap(vaddr);
Magnus Dammf93e97e2008-01-24 18:35:10 +090084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
Magnus Dammf93e97e2008-01-24 18:35:10 +090086EXPORT_SYMBOL(dma_free_coherent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Magnus Dammf93e97e2008-01-24 18:35:10 +090088int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
89 dma_addr_t device_addr, size_t size, int flags)
90{
91 void __iomem *mem_base = NULL;
92 int pages = size >> PAGE_SHIFT;
93 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
94
95 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
96 goto out;
97 if (!size)
98 goto out;
99 if (dev->dma_mem)
100 goto out;
101
102 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
103
104 mem_base = ioremap_nocache(bus_addr, size);
105 if (!mem_base)
106 goto out;
107
108 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
109 if (!dev->dma_mem)
110 goto out;
111 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
112 if (!dev->dma_mem->bitmap)
113 goto free1_out;
114
115 dev->dma_mem->virt_base = mem_base;
116 dev->dma_mem->device_base = device_addr;
117 dev->dma_mem->size = pages;
118 dev->dma_mem->flags = flags;
119
120 if (flags & DMA_MEMORY_MAP)
121 return DMA_MEMORY_MAP;
122
123 return DMA_MEMORY_IO;
124
125 free1_out:
126 kfree(dev->dma_mem);
127 out:
128 if (mem_base)
129 iounmap(mem_base);
130 return 0;
131}
132EXPORT_SYMBOL(dma_declare_coherent_memory);
133
134void dma_release_declared_memory(struct device *dev)
135{
136 struct dma_coherent_mem *mem = dev->dma_mem;
137
138 if (!mem)
139 return;
140 dev->dma_mem = NULL;
141 iounmap(mem->virt_base);
142 kfree(mem->bitmap);
143 kfree(mem);
144}
145EXPORT_SYMBOL(dma_release_declared_memory);
146
147void *dma_mark_declared_memory_occupied(struct device *dev,
148 dma_addr_t device_addr, size_t size)
149{
150 struct dma_coherent_mem *mem = dev->dma_mem;
151 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
152 int pos, err;
153
154 if (!mem)
155 return ERR_PTR(-EINVAL);
156
157 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
158 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
159 if (err != 0)
160 return ERR_PTR(err);
161 return mem->virt_base + (pos << PAGE_SHIFT);
162}
163EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
164
165void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
166 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Paul Mundt8a7bcf02007-11-11 17:07:06 +0900168#ifdef CONFIG_CPU_SH5
169 void *p1addr = vaddr;
170#else
171 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
172#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
174 switch (direction) {
175 case DMA_FROM_DEVICE: /* invalidate only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -0700176 __flush_invalidate_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 break;
178 case DMA_TO_DEVICE: /* writeback only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -0700179 __flush_wback_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 break;
181 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Ralf Baechle622a9ed2007-10-16 23:29:42 -0700182 __flush_purge_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 break;
184 default:
185 BUG();
186 }
187}
Magnus Dammf93e97e2008-01-24 18:35:10 +0900188EXPORT_SYMBOL(dma_cache_sync);
Magnus Damm1eca5c92008-07-16 19:02:54 +0900189
190int platform_resource_setup_memory(struct platform_device *pdev,
191 char *name, unsigned long memsize)
192{
193 struct resource *r;
194 dma_addr_t dma_handle;
195 void *buf;
196
197 r = pdev->resource + pdev->num_resources - 1;
198 if (r->flags) {
199 pr_warning("%s: unable to find empty space for resource\n",
200 name);
201 return -EINVAL;
202 }
203
204 buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
205 if (!buf) {
206 pr_warning("%s: unable to allocate memory\n", name);
207 return -ENOMEM;
208 }
209
210 memset(buf, 0, memsize);
211
212 r->flags = IORESOURCE_MEM;
213 r->start = dma_handle;
214 r->end = r->start + memsize - 1;
215 r->name = name;
216 return 0;
217}