blob: 503093efa202bfc6e29ab2761cd90bd943fef2dc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11002 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11004 * Provide default implementations of the DMA mapping callbacks for
Becky Bruce8dd0e952008-09-08 09:09:53 +00005 * directly mapped busses.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +000010#include <linux/dma-debug.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100012#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/bug.h>
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110014#include <asm/abs_addr.h>
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +000015#include <asm/machdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110017/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110018 * Generic direct DMA implementation
Benjamin Herrenschmidt92b20c42006-11-11 17:25:14 +110019 *
Michael Ellerman31d1b492008-01-21 16:42:48 +110020 * This implementation supports a per-device offset that can be applied if
21 * the address at which memory is visible to devices is not 0. Platform code
22 * can set archdata.dma_data to an unsigned long holding the offset. By
Becky Bruce4fc665b2008-09-12 10:34:46 +000023 * default the offset is PCI_DRAM_OFFSET.
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110024 */
25
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110026
Becky Bruce4fc665b2008-09-12 10:34:46 +000027void *dma_direct_alloc_coherent(struct device *dev, size_t size,
28 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000030 void *ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000031#ifdef CONFIG_NOT_COHERENT_CACHE
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100032 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000033 if (ret == NULL)
34 return NULL;
Becky Bruce1cebd7a2009-09-21 08:26:34 +000035 *dma_handle += get_dma_offset(dev);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000036 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000037#else
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110038 struct page *page;
Becky Bruce8fae0352008-09-08 09:09:54 +000039 int node = dev_to_node(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Becky Bruce4fc665b2008-09-12 10:34:46 +000041 /* ignore region specifiers */
42 flag &= ~(__GFP_HIGHMEM);
43
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110044 page = alloc_pages_node(node, flag, get_order(size));
45 if (page == NULL)
46 return NULL;
47 ret = page_address(page);
48 memset(ret, 0, size);
Becky Bruce1cebd7a2009-09-21 08:26:34 +000049 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110050
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110051 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000052#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Becky Bruce4fc665b2008-09-12 10:34:46 +000055void dma_direct_free_coherent(struct device *dev, size_t size,
56 void *vaddr, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Becky Bruce4fc665b2008-09-12 10:34:46 +000058#ifdef CONFIG_NOT_COHERENT_CACHE
59 __dma_free_coherent(size, vaddr);
60#else
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110061 free_pages((unsigned long)vaddr, get_order(size));
Becky Bruce4fc665b2008-09-12 10:34:46 +000062#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110063}
64
Jens Axboe78bdc312007-10-12 13:44:12 +020065static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
Mark Nelson3affedc2008-07-05 05:05:42 +100066 int nents, enum dma_data_direction direction,
67 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110068{
Jens Axboe78bdc312007-10-12 13:44:12 +020069 struct scatterlist *sg;
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110070 int i;
71
Jens Axboe78bdc312007-10-12 13:44:12 +020072 for_each_sg(sgl, sg, nents, i) {
Becky Bruce1cebd7a2009-09-21 08:26:34 +000073 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110074 sg->dma_length = sg->length;
Benjamin Herrenschmidt2434bbb2008-11-30 18:53:40 +000075 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110076 }
77
78 return nents;
79}
80
81static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100082 int nents, enum dma_data_direction direction,
83 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110084{
85}
86
87static int dma_direct_dma_supported(struct device *dev, u64 mask)
88{
Becky Bruce4fc665b2008-09-12 10:34:46 +000089#ifdef CONFIG_PPC64
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +100090 /* Could be improved so platforms can set the limit in case
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110091 * they have limited DMA windows
92 */
Nishanth Aravamudanffa56e552010-09-15 08:05:46 +000093 return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
Becky Bruce4fc665b2008-09-12 10:34:46 +000094#else
95 return 1;
96#endif
97}
98
99static inline dma_addr_t dma_direct_map_page(struct device *dev,
100 struct page *page,
101 unsigned long offset,
102 size_t size,
103 enum dma_data_direction dir,
104 struct dma_attrs *attrs)
105{
106 BUG_ON(dir == DMA_NONE);
107 __dma_sync_page(page, offset, size, dir);
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000108 return page_to_phys(page) + offset + get_dma_offset(dev);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000109}
110
111static inline void dma_direct_unmap_page(struct device *dev,
112 dma_addr_t dma_address,
113 size_t size,
114 enum dma_data_direction direction,
115 struct dma_attrs *attrs)
116{
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100117}
118
Becky Bruce15e09c02008-11-20 06:49:16 +0000119#ifdef CONFIG_NOT_COHERENT_CACHE
120static inline void dma_direct_sync_sg(struct device *dev,
121 struct scatterlist *sgl, int nents,
122 enum dma_data_direction direction)
123{
124 struct scatterlist *sg;
125 int i;
126
127 for_each_sg(sgl, sg, nents, i)
128 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
129}
130
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700131static inline void dma_direct_sync_single(struct device *dev,
132 dma_addr_t dma_handle, size_t size,
133 enum dma_data_direction direction)
Becky Bruce15e09c02008-11-20 06:49:16 +0000134{
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700135 __dma_sync(bus_to_virt(dma_handle), size, direction);
Becky Bruce15e09c02008-11-20 06:49:16 +0000136}
137#endif
138
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000139struct dma_map_ops dma_direct_ops = {
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100140 .alloc_coherent = dma_direct_alloc_coherent,
141 .free_coherent = dma_direct_free_coherent,
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100142 .map_sg = dma_direct_map_sg,
143 .unmap_sg = dma_direct_unmap_sg,
144 .dma_supported = dma_direct_dma_supported,
Becky Bruce4fc665b2008-09-12 10:34:46 +0000145 .map_page = dma_direct_map_page,
146 .unmap_page = dma_direct_unmap_page,
Becky Bruce15e09c02008-11-20 06:49:16 +0000147#ifdef CONFIG_NOT_COHERENT_CACHE
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700148 .sync_single_for_cpu = dma_direct_sync_single,
149 .sync_single_for_device = dma_direct_sync_single,
Becky Bruce15e09c02008-11-20 06:49:16 +0000150 .sync_sg_for_cpu = dma_direct_sync_sg,
151 .sync_sg_for_device = dma_direct_sync_sg,
152#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100153};
154EXPORT_SYMBOL(dma_direct_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000155
156#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
157
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +0000158int dma_set_mask(struct device *dev, u64 dma_mask)
159{
160 struct dma_map_ops *dma_ops = get_dma_ops(dev);
161
162 if (ppc_md.dma_set_mask)
163 return ppc_md.dma_set_mask(dev, dma_mask);
Kumar Gala6471fc62011-06-10 02:22:06 -0500164 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +0000165 return dma_ops->set_dma_mask(dev, dma_mask);
166 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
167 return -EIO;
168 *dev->dma_mask = dma_mask;
169 return 0;
170}
171EXPORT_SYMBOL(dma_set_mask);
172
Milton Miller6a5c7be2011-06-24 09:05:22 +0000173u64 dma_get_required_mask(struct device *dev)
174{
175 struct dma_map_ops *dma_ops = get_dma_ops(dev);
176 u64 mask, end = 0;
177
178 if (ppc_md.dma_get_required_mask)
179 return ppc_md.dma_get_required_mask(dev);
180
181 if (unlikely(dma_ops == NULL))
182 return 0;
183
184#ifdef CONFIG_PPC64
185 else if (dma_ops == &dma_iommu_ops)
186 return dma_iommu_get_required_mask(dev);
187#endif
188#ifdef CONFIG_SWIOTLB
189 else if (dma_ops == &swiotlb_dma_ops) {
190 u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
191
192 end = memblock_end_of_DRAM();
193 if (max_direct_dma_addr && end > max_direct_dma_addr)
194 end = max_direct_dma_addr;
195 end += get_dma_offset(dev);
196 }
197#endif
198 else if (dma_ops == &dma_direct_ops)
199 end = memblock_end_of_DRAM() + get_dma_offset(dev);
200 else {
201 WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
202 end = memblock_end_of_DRAM();
203 }
204
205 mask = 1ULL << (fls64(end) - 1);
206 mask += mask - 1;
207
208 return mask;
209}
210EXPORT_SYMBOL_GPL(dma_get_required_mask);
211
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000212static int __init dma_init(void)
213{
214 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
215
216 return 0;
217}
218fs_initcall(dma_init);
Benjamin Herrenschmidt60909122011-03-24 20:50:06 +0000219
220int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
221 void *cpu_addr, dma_addr_t handle, size_t size)
222{
223 unsigned long pfn;
224
225#ifdef CONFIG_NOT_COHERENT_CACHE
226 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
227 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
228#else
229 pfn = page_to_pfn(virt_to_page(cpu_addr));
230#endif
231 return remap_pfn_range(vma, vma->vm_start,
232 pfn + vma->vm_pgoff,
233 vma->vm_end - vma->vm_start,
234 vma->vm_page_prot);
235}
236EXPORT_SYMBOL_GPL(dma_mmap_coherent);