blob: 355b9d84b0f8149efd45ed9b5b0035c3b70fac11 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11002 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11004 * Provide default implementations of the DMA mapping callbacks for
Becky Bruce8dd0e952008-09-08 09:09:53 +00005 * directly mapped busses.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +000010#include <linux/dma-debug.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100012#include <linux/memblock.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040013#include <linux/export.h>
Anton Blancharda9803492012-06-24 18:25:28 +000014#include <linux/pci.h>
15#include <asm/vio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/bug.h>
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110017#include <asm/abs_addr.h>
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +000018#include <asm/machdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110020/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110021 * Generic direct DMA implementation
Benjamin Herrenschmidt92b20c42006-11-11 17:25:14 +110022 *
Michael Ellerman31d1b492008-01-21 16:42:48 +110023 * This implementation supports a per-device offset that can be applied if
24 * the address at which memory is visible to devices is not 0. Platform code
25 * can set archdata.dma_data to an unsigned long holding the offset. By
Becky Bruce4fc665b2008-09-12 10:34:46 +000026 * default the offset is PCI_DRAM_OFFSET.
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110027 */
28
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110029
Becky Bruce4fc665b2008-09-12 10:34:46 +000030void *dma_direct_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +010031 dma_addr_t *dma_handle, gfp_t flag,
32 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000034 void *ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000035#ifdef CONFIG_NOT_COHERENT_CACHE
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100036 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000037 if (ret == NULL)
38 return NULL;
Becky Bruce1cebd7a2009-09-21 08:26:34 +000039 *dma_handle += get_dma_offset(dev);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000040 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000041#else
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110042 struct page *page;
Becky Bruce8fae0352008-09-08 09:09:54 +000043 int node = dev_to_node(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Becky Bruce4fc665b2008-09-12 10:34:46 +000045 /* ignore region specifiers */
46 flag &= ~(__GFP_HIGHMEM);
47
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110048 page = alloc_pages_node(node, flag, get_order(size));
49 if (page == NULL)
50 return NULL;
51 ret = page_address(page);
52 memset(ret, 0, size);
Becky Bruce1cebd7a2009-09-21 08:26:34 +000053 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110054
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110055 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000056#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Becky Bruce4fc665b2008-09-12 10:34:46 +000059void dma_direct_free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +010060 void *vaddr, dma_addr_t dma_handle,
61 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
Becky Bruce4fc665b2008-09-12 10:34:46 +000063#ifdef CONFIG_NOT_COHERENT_CACHE
64 __dma_free_coherent(size, vaddr);
65#else
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110066 free_pages((unsigned long)vaddr, get_order(size));
Becky Bruce4fc665b2008-09-12 10:34:46 +000067#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110068}
69
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +020070int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
71 void *cpu_addr, dma_addr_t handle, size_t size,
72 struct dma_attrs *attrs)
73{
74 unsigned long pfn;
75
76#ifdef CONFIG_NOT_COHERENT_CACHE
77 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
78 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
79#else
80 pfn = page_to_pfn(virt_to_page(cpu_addr));
81#endif
82 return remap_pfn_range(vma, vma->vm_start,
83 pfn + vma->vm_pgoff,
84 vma->vm_end - vma->vm_start,
85 vma->vm_page_prot);
86}
87
Jens Axboe78bdc312007-10-12 13:44:12 +020088static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
Mark Nelson3affedc2008-07-05 05:05:42 +100089 int nents, enum dma_data_direction direction,
90 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110091{
Jens Axboe78bdc312007-10-12 13:44:12 +020092 struct scatterlist *sg;
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110093 int i;
94
Jens Axboe78bdc312007-10-12 13:44:12 +020095 for_each_sg(sgl, sg, nents, i) {
Becky Bruce1cebd7a2009-09-21 08:26:34 +000096 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110097 sg->dma_length = sg->length;
Benjamin Herrenschmidt2434bbb2008-11-30 18:53:40 +000098 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110099 }
100
101 return nents;
102}
103
104static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +1000105 int nents, enum dma_data_direction direction,
106 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100107{
108}
109
110static int dma_direct_dma_supported(struct device *dev, u64 mask)
111{
Becky Bruce4fc665b2008-09-12 10:34:46 +0000112#ifdef CONFIG_PPC64
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +1000113 /* Could be improved so platforms can set the limit in case
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100114 * they have limited DMA windows
115 */
Nishanth Aravamudanffa56e552010-09-15 08:05:46 +0000116 return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000117#else
118 return 1;
119#endif
120}
121
Milton Millerd24f9c62011-06-24 09:05:24 +0000122static u64 dma_direct_get_required_mask(struct device *dev)
123{
124 u64 end, mask;
125
126 end = memblock_end_of_DRAM() + get_dma_offset(dev);
127
128 mask = 1ULL << (fls64(end) - 1);
129 mask += mask - 1;
130
131 return mask;
132}
133
Becky Bruce4fc665b2008-09-12 10:34:46 +0000134static inline dma_addr_t dma_direct_map_page(struct device *dev,
135 struct page *page,
136 unsigned long offset,
137 size_t size,
138 enum dma_data_direction dir,
139 struct dma_attrs *attrs)
140{
141 BUG_ON(dir == DMA_NONE);
142 __dma_sync_page(page, offset, size, dir);
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000143 return page_to_phys(page) + offset + get_dma_offset(dev);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000144}
145
146static inline void dma_direct_unmap_page(struct device *dev,
147 dma_addr_t dma_address,
148 size_t size,
149 enum dma_data_direction direction,
150 struct dma_attrs *attrs)
151{
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100152}
153
Becky Bruce15e09c02008-11-20 06:49:16 +0000154#ifdef CONFIG_NOT_COHERENT_CACHE
155static inline void dma_direct_sync_sg(struct device *dev,
156 struct scatterlist *sgl, int nents,
157 enum dma_data_direction direction)
158{
159 struct scatterlist *sg;
160 int i;
161
162 for_each_sg(sgl, sg, nents, i)
163 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
164}
165
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700166static inline void dma_direct_sync_single(struct device *dev,
167 dma_addr_t dma_handle, size_t size,
168 enum dma_data_direction direction)
Becky Bruce15e09c02008-11-20 06:49:16 +0000169{
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700170 __dma_sync(bus_to_virt(dma_handle), size, direction);
Becky Bruce15e09c02008-11-20 06:49:16 +0000171}
172#endif
173
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000174struct dma_map_ops dma_direct_ops = {
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +0100175 .alloc = dma_direct_alloc_coherent,
176 .free = dma_direct_free_coherent,
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200177 .mmap = dma_direct_mmap_coherent,
Milton Miller2eccacd2011-06-24 09:05:25 +0000178 .map_sg = dma_direct_map_sg,
179 .unmap_sg = dma_direct_unmap_sg,
180 .dma_supported = dma_direct_dma_supported,
181 .map_page = dma_direct_map_page,
182 .unmap_page = dma_direct_unmap_page,
183 .get_required_mask = dma_direct_get_required_mask,
Becky Bruce15e09c02008-11-20 06:49:16 +0000184#ifdef CONFIG_NOT_COHERENT_CACHE
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700185 .sync_single_for_cpu = dma_direct_sync_single,
186 .sync_single_for_device = dma_direct_sync_single,
Becky Bruce15e09c02008-11-20 06:49:16 +0000187 .sync_sg_for_cpu = dma_direct_sync_sg,
188 .sync_sg_for_device = dma_direct_sync_sg,
189#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100190};
191EXPORT_SYMBOL(dma_direct_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000192
193#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
194
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +0000195int dma_set_mask(struct device *dev, u64 dma_mask)
196{
197 struct dma_map_ops *dma_ops = get_dma_ops(dev);
198
199 if (ppc_md.dma_set_mask)
200 return ppc_md.dma_set_mask(dev, dma_mask);
Kumar Gala6471fc62011-06-10 02:22:06 -0500201 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +0000202 return dma_ops->set_dma_mask(dev, dma_mask);
203 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
204 return -EIO;
205 *dev->dma_mask = dma_mask;
206 return 0;
207}
208EXPORT_SYMBOL(dma_set_mask);
209
Milton Miller6a5c7be2011-06-24 09:05:22 +0000210u64 dma_get_required_mask(struct device *dev)
211{
212 struct dma_map_ops *dma_ops = get_dma_ops(dev);
Milton Miller6a5c7be2011-06-24 09:05:22 +0000213
214 if (ppc_md.dma_get_required_mask)
215 return ppc_md.dma_get_required_mask(dev);
216
217 if (unlikely(dma_ops == NULL))
218 return 0;
219
Milton Millerd24f9c62011-06-24 09:05:24 +0000220 if (dma_ops->get_required_mask)
221 return dma_ops->get_required_mask(dev);
Milton Miller6a5c7be2011-06-24 09:05:22 +0000222
Milton Millerd24f9c62011-06-24 09:05:24 +0000223 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
Milton Miller6a5c7be2011-06-24 09:05:22 +0000224}
225EXPORT_SYMBOL_GPL(dma_get_required_mask);
226
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000227static int __init dma_init(void)
228{
Anton Blancharda9803492012-06-24 18:25:28 +0000229 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
230#ifdef CONFIG_PCI
231 dma_debug_add_bus(&pci_bus_type);
232#endif
233#ifdef CONFIG_IBMVIO
234 dma_debug_add_bus(&vio_bus_type);
235#endif
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000236
237 return 0;
238}
239fs_initcall(dma_init);
Benjamin Herrenschmidt60909122011-03-24 20:50:06 +0000240