blob: 6725a08a5c21e3c26efca41935bb948b47ca4244 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
Russell King98ed7d42008-08-10 12:10:49 +01006#include <linux/mm_types.h>
Jens Axboedee9ba82007-10-23 12:37:59 +02007#include <linux/scatterlist.h>
Russell King24056f52011-01-03 11:29:28 +00008#include <linux/dma-debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040010#include <asm-generic/dma-coherent.h>
Russell King98ed7d42008-08-10 12:10:49 +010011#include <asm/memory.h>
12
Marek Szyprowski553ac782012-02-29 14:45:28 +010013#define DMA_ERROR_CODE (~0)
Marek Szyprowski2dc6a012012-02-10 19:55:20 +010014extern struct dma_map_ops arm_dma_ops;
15
16static inline struct dma_map_ops *get_dma_ops(struct device *dev)
17{
18 if (dev && dev->archdata.dma_ops)
19 return dev->archdata.dma_ops;
20 return &arm_dma_ops;
21}
22
23static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
24{
25 BUG_ON(!dev);
26 dev->archdata.dma_ops = ops;
27}
28
29#include <asm-generic/dma-mapping-common.h>
30
31static inline int dma_set_mask(struct device *dev, u64 mask)
32{
33 return get_dma_ops(dev)->set_dma_mask(dev, mask);
34}
Marek Szyprowski553ac782012-02-29 14:45:28 +010035
Russell King9eedd962011-01-03 00:00:17 +000036#ifdef __arch_page_to_dma
37#error Please update to __arch_pfn_to_dma
38#endif
39
Russell King98ed7d42008-08-10 12:10:49 +010040/*
Russell King9eedd962011-01-03 00:00:17 +000041 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
42 * functions used internally by the DMA-mapping API to provide DMA
43 * addresses. They must not be used by drivers.
Russell King98ed7d42008-08-10 12:10:49 +010044 */
Russell King9eedd962011-01-03 00:00:17 +000045#ifndef __arch_pfn_to_dma
46static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
Nicolas Pitre58edb512008-09-09 15:54:13 -040047{
Russell King9eedd962011-01-03 00:00:17 +000048 return (dma_addr_t)__pfn_to_bus(pfn);
Nicolas Pitre58edb512008-09-09 15:54:13 -040049}
Russell King98ed7d42008-08-10 12:10:49 +010050
Russell King9eedd962011-01-03 00:00:17 +000051static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
Russell Kingef1baed2009-10-31 16:07:16 +000052{
Russell King9eedd962011-01-03 00:00:17 +000053 return __bus_to_pfn(addr);
Russell Kingef1baed2009-10-31 16:07:16 +000054}
55
Russell King98ed7d42008-08-10 12:10:49 +010056static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
57{
Catalin Marinas01f461a2011-08-23 13:59:14 +010058 return (void *)__bus_to_virt((unsigned long)addr);
Russell King98ed7d42008-08-10 12:10:49 +010059}
60
61static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
62{
63 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
64}
65#else
Russell King9eedd962011-01-03 00:00:17 +000066static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
Russell King98ed7d42008-08-10 12:10:49 +010067{
Russell King9eedd962011-01-03 00:00:17 +000068 return __arch_pfn_to_dma(dev, pfn);
Russell King98ed7d42008-08-10 12:10:49 +010069}
70
Russell King9eedd962011-01-03 00:00:17 +000071static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
Russell Kingef1baed2009-10-31 16:07:16 +000072{
Russell King9eedd962011-01-03 00:00:17 +000073 return __arch_dma_to_pfn(dev, addr);
Russell Kingef1baed2009-10-31 16:07:16 +000074}
75
Russell King98ed7d42008-08-10 12:10:49 +010076static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
77{
78 return __arch_dma_to_virt(dev, addr);
79}
80
81static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
82{
83 return __arch_virt_to_dma(dev, addr);
84}
85#endif
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040086
Linus Torvalds1da177e2005-04-16 15:20:36 -070087/*
Russell King18eabe22009-10-31 16:52:16 +000088 * The DMA API is built upon the notion of "buffer ownership". A buffer
89 * is either exclusively owned by the CPU (and therefore may be accessed
90 * by it) or exclusively owned by the DMA device. These helper functions
91 * represent the transitions between these two ownership states.
92 *
Russell King4ea0d732009-11-24 16:27:17 +000093 * Note, however, that on later ARMs, this notion does not work due to
94 * speculative prefetches. We model our approach on the assumption that
95 * the CPU does do speculative prefetches, which means we clean caches
96 * before transfers and delay cache invalidation until transfer completion.
97 *
98 * Private support functions: these are not part of the API and are
99 * liable to change. Drivers must not use these.
Russell King18eabe22009-10-31 16:52:16 +0000100 */
101static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
102 enum dma_data_direction dir)
103{
Russell King4ea0d732009-11-24 16:27:17 +0000104 extern void ___dma_single_cpu_to_dev(const void *, size_t,
105 enum dma_data_direction);
106
Russell King18eabe22009-10-31 16:52:16 +0000107 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +0000108 ___dma_single_cpu_to_dev(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000109}
110
111static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
112 enum dma_data_direction dir)
113{
Russell King4ea0d732009-11-24 16:27:17 +0000114 extern void ___dma_single_dev_to_cpu(const void *, size_t,
115 enum dma_data_direction);
116
117 if (!arch_is_coherent())
118 ___dma_single_dev_to_cpu(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000119}
120
121static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
122 size_t size, enum dma_data_direction dir)
123{
Russell King4ea0d732009-11-24 16:27:17 +0000124 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
125 size_t, enum dma_data_direction);
126
Russell King18eabe22009-10-31 16:52:16 +0000127 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +0000128 ___dma_page_cpu_to_dev(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000129}
130
131static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
132 size_t size, enum dma_data_direction dir)
133{
Russell King4ea0d732009-11-24 16:27:17 +0000134 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
135 size_t, enum dma_data_direction);
136
137 if (!arch_is_coherent())
138 ___dma_page_dev_to_cpu(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000139}
140
Russell King022ae532011-07-08 21:26:59 +0100141extern int dma_supported(struct device *, u64);
142extern int dma_set_mask(struct device *, u64);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
144 * DMA errors are defined by all-bits-set in the DMA address.
145 */
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700146static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
Marek Szyprowski553ac782012-02-29 14:45:28 +0100148 return dma_addr == DMA_ERROR_CODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
Russell Kingf454aa62007-02-12 19:26:05 +0000151/*
152 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
153 * function so drivers using this API are highlighted with build warnings.
154 */
Russell King3216a972008-09-25 22:23:31 +0100155static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
156 dma_addr_t *handle, gfp_t gfp)
Russell Kingf454aa62007-02-12 19:26:05 +0000157{
158 return NULL;
159}
160
Russell King3216a972008-09-25 22:23:31 +0100161static inline void dma_free_noncoherent(struct device *dev, size_t size,
162 void *cpu_addr, dma_addr_t handle)
Russell Kingf454aa62007-02-12 19:26:05 +0000163{
164}
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166/**
167 * dma_alloc_coherent - allocate consistent memory for DMA
168 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
169 * @size: required memory size
170 * @handle: bus-specific DMA address
171 *
172 * Allocate some uncached, unbuffered memory for a device for
173 * performing DMA. This function allocates pages, and will
174 * return the CPU-viewed address, and sets @handle to be the
175 * device-viewed address.
176 */
Russell King3216a972008-09-25 22:23:31 +0100177extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179/**
180 * dma_free_coherent - free memory allocated by dma_alloc_coherent
181 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
182 * @size: size of memory originally requested in dma_alloc_coherent
183 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
184 * @handle: device-view address returned from dma_alloc_coherent
185 *
186 * Free (and unmap) a DMA buffer previously allocated by
187 * dma_alloc_coherent().
188 *
189 * References to memory and mappings associated with cpu_addr/handle
190 * during and after this call executing are illegal.
191 */
Russell King3216a972008-09-25 22:23:31 +0100192extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194/**
195 * dma_mmap_coherent - map a coherent DMA allocation into user space
196 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
197 * @vma: vm_area_struct describing requested user mapping
198 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
199 * @handle: device-view address returned from dma_alloc_coherent
200 * @size: size of memory originally requested in dma_alloc_coherent
201 *
202 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
203 * into user space. The coherent DMA buffer must not be freed by the
204 * driver until the user space mapping has been released.
205 */
Russell King3216a972008-09-25 22:23:31 +0100206int dma_mmap_coherent(struct device *, struct vm_area_struct *,
207 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209
210/**
211 * dma_alloc_writecombine - allocate writecombining memory for DMA
212 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
213 * @size: required memory size
214 * @handle: bus-specific DMA address
215 *
216 * Allocate some uncached, buffered memory for a device for
217 * performing DMA. This function allocates pages, and will
218 * return the CPU-viewed address, and sets @handle to be the
219 * device-viewed address.
220 */
Russell King3216a972008-09-25 22:23:31 +0100221extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
222 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224#define dma_free_writecombine(dev,size,cpu_addr,handle) \
225 dma_free_coherent(dev,size,cpu_addr,handle)
226
Russell King3216a972008-09-25 22:23:31 +0100227int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
228 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Jon Medhurst99d17172011-08-02 17:28:27 +0100230/*
231 * This can be called during boot to increase the size of the consistent
232 * DMA region above it's default value of 2MB. It must be called before the
233 * memory allocator is initialised, i.e. before any core_initcall.
234 */
235extern void __init init_consistent_dma_size(unsigned long size);
236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Russell King8c8a0ec2008-09-25 21:52:49 +0100238#ifdef CONFIG_DMABOUNCE
239/*
240 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
241 * and utilize bounce buffers as needed to work around limited DMA windows.
242 *
243 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
244 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
245 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
246 *
247 * The following are helper functions used by the dmabounce subystem
248 *
249 */
250
251/**
252 * dmabounce_register_dev
253 *
254 * @dev: valid struct device pointer
255 * @small_buf_size: size of buffers to use with small buffer pool
256 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
Russell King0703ed22011-07-04 08:32:21 +0100257 * @needs_bounce_fn: called to determine whether buffer needs bouncing
Russell King8c8a0ec2008-09-25 21:52:49 +0100258 *
259 * This function should be called by low-level platform code to register
260 * a device as requireing DMA buffer bouncing. The function will allocate
261 * appropriate DMA pools for the device.
Russell King8c8a0ec2008-09-25 21:52:49 +0100262 */
Russell King3216a972008-09-25 22:23:31 +0100263extern int dmabounce_register_dev(struct device *, unsigned long,
Russell King0703ed22011-07-04 08:32:21 +0100264 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
Russell King8c8a0ec2008-09-25 21:52:49 +0100265
266/**
267 * dmabounce_unregister_dev
268 *
269 * @dev: valid struct device pointer
270 *
271 * This function should be called by low-level platform code when device
272 * that was previously registered with dmabounce_register_dev is removed
273 * from the system.
274 *
275 */
276extern void dmabounce_unregister_dev(struct device *);
277
Russell King8c8a0ec2008-09-25 21:52:49 +0100278/*
Russell King125ab122008-09-25 22:16:22 +0100279 * The DMA API, implemented by dmabounce.c. See below for descriptions.
280 */
Russell King24056f52011-01-03 11:29:28 +0000281extern dma_addr_t __dma_map_page(struct device *, struct page *,
Russell King3216a972008-09-25 22:23:31 +0100282 unsigned long, size_t, enum dma_data_direction);
Russell King24056f52011-01-03 11:29:28 +0000283extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
Russell King3216a972008-09-25 22:23:31 +0100284 enum dma_data_direction);
Russell King125ab122008-09-25 22:16:22 +0100285
286/*
Russell King8c8a0ec2008-09-25 21:52:49 +0100287 * Private functions
288 */
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100289int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
290int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
Russell King8c8a0ec2008-09-25 21:52:49 +0100291#else
Russell King9fa76792008-11-13 14:33:51 +0000292static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100293 size_t size, enum dma_data_direction dir)
Russell King9fa76792008-11-13 14:33:51 +0000294{
295 return 1;
296}
297
298static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100299 size_t size, enum dma_data_direction dir)
Russell King9fa76792008-11-13 14:33:51 +0000300{
301 return 1;
302}
Russell King8c8a0ec2008-09-25 21:52:49 +0100303
304
Russell King24056f52011-01-03 11:29:28 +0000305static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
306 unsigned long offset, size_t size, enum dma_data_direction dir)
307{
308 __dma_page_cpu_to_dev(page, offset, size, dir);
309 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
310}
311
Russell King24056f52011-01-03 11:29:28 +0000312static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
313 size_t size, enum dma_data_direction dir)
314{
315 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
316 handle & ~PAGE_MASK, size, dir);
317}
318#endif /* CONFIG_DMABOUNCE */
319
Russell Kingafd1a322008-09-25 16:30:57 +0100320/*
321 * The scatter list versions of the above methods.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 */
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100323extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
324 enum dma_data_direction, struct dma_attrs *attrs);
325extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
326 enum dma_data_direction, struct dma_attrs *attrs);
327extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
Russell King3216a972008-09-25 22:23:31 +0100328 enum dma_data_direction);
Marek Szyprowski2dc6a012012-02-10 19:55:20 +0100329extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
Russell King3216a972008-09-25 22:23:31 +0100330 enum dma_data_direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332#endif /* __KERNEL__ */
333#endif