blob: b5ccc6a993d59fc94d8eeb02d857946e4502a959 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
Russell King98ed7d42008-08-10 12:10:49 +01006#include <linux/mm_types.h>
Jens Axboedee9ba82007-10-23 12:37:59 +02007#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +04009#include <asm-generic/dma-coherent.h>
Russell King98ed7d42008-08-10 12:10:49 +010010#include <asm/memory.h>
11
12/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
16 */
17#ifndef __arch_page_to_dma
Nicolas Pitre58edb512008-09-09 15:54:13 -040018static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
19{
20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
21}
Russell King98ed7d42008-08-10 12:10:49 +010022
Russell Kingef1baed2009-10-31 16:07:16 +000023static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
24{
25 return pfn_to_page(__bus_to_pfn(addr));
26}
27
Russell King98ed7d42008-08-10 12:10:49 +010028static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
29{
30 return (void *)__bus_to_virt(addr);
31}
32
33static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
34{
35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
36}
37#else
38static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
39{
40 return __arch_page_to_dma(dev, page);
41}
42
Russell Kingef1baed2009-10-31 16:07:16 +000043static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
44{
45 return __arch_dma_to_page(dev, addr);
46}
47
Russell King98ed7d42008-08-10 12:10:49 +010048static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
49{
50 return __arch_dma_to_virt(dev, addr);
51}
52
53static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
54{
55 return __arch_virt_to_dma(dev, addr);
56}
57#endif
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059/*
Russell King18eabe22009-10-31 16:52:16 +000060 * The DMA API is built upon the notion of "buffer ownership". A buffer
61 * is either exclusively owned by the CPU (and therefore may be accessed
62 * by it) or exclusively owned by the DMA device. These helper functions
63 * represent the transitions between these two ownership states.
64 *
Russell King4ea0d732009-11-24 16:27:17 +000065 * Note, however, that on later ARMs, this notion does not work due to
66 * speculative prefetches. We model our approach on the assumption that
67 * the CPU does do speculative prefetches, which means we clean caches
68 * before transfers and delay cache invalidation until transfer completion.
69 *
70 * Private support functions: these are not part of the API and are
71 * liable to change. Drivers must not use these.
Russell King18eabe22009-10-31 16:52:16 +000072 */
73static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
74 enum dma_data_direction dir)
75{
Russell King4ea0d732009-11-24 16:27:17 +000076 extern void ___dma_single_cpu_to_dev(const void *, size_t,
77 enum dma_data_direction);
78
Russell King18eabe22009-10-31 16:52:16 +000079 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +000080 ___dma_single_cpu_to_dev(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +000081}
82
83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir)
85{
Russell King4ea0d732009-11-24 16:27:17 +000086 extern void ___dma_single_dev_to_cpu(const void *, size_t,
87 enum dma_data_direction);
88
89 if (!arch_is_coherent())
90 ___dma_single_dev_to_cpu(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +000091}
92
93static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
94 size_t size, enum dma_data_direction dir)
95{
Russell King4ea0d732009-11-24 16:27:17 +000096 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
97 size_t, enum dma_data_direction);
98
Russell King18eabe22009-10-31 16:52:16 +000099 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +0000100 ___dma_page_cpu_to_dev(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000101}
102
103static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
104 size_t size, enum dma_data_direction dir)
105{
Russell King4ea0d732009-11-24 16:27:17 +0000106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108
109 if (!arch_is_coherent())
110 ___dma_page_dev_to_cpu(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000111}
112
113/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 * Return whether the given device DMA address mask can be supported
115 * properly. For example, if your device can only drive the low 24-bits
116 * during bus mastering, then you would pass 0x00ffffff as the mask
117 * to this function.
akpm@osdl.org7a228aa2005-04-16 15:23:57 -0700118 *
119 * FIXME: This should really be a platform specific issue - we should
120 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 */
122static inline int dma_supported(struct device *dev, u64 mask)
123{
Russell King1124d6d2008-10-20 11:18:40 +0100124 if (mask < ISA_DMA_THRESHOLD)
125 return 0;
126 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127}
128
129static inline int dma_set_mask(struct device *dev, u64 dma_mask)
130{
FUJITA Tomonori6fee48c2010-03-10 15:23:40 -0800131#ifdef CONFIG_DMABOUNCE
132 if (dev->archdata.dmabounce) {
133 if (dma_mask >= ISA_DMA_THRESHOLD)
134 return 0;
135 else
136 return -EIO;
137 }
138#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
140 return -EIO;
141
142 *dev->dma_mask = dma_mask;
143
144 return 0;
145}
146
147static inline int dma_get_cache_alignment(void)
148{
149 return 32;
150}
151
Ralf Baechlef67637e2006-12-06 20:38:54 -0800152static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153{
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100154 return !!arch_is_coherent();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
157/*
158 * DMA errors are defined by all-bits-set in the DMA address.
159 */
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700160static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
162 return dma_addr == ~0;
163}
164
Russell Kingf454aa62007-02-12 19:26:05 +0000165/*
166 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
167 * function so drivers using this API are highlighted with build warnings.
168 */
Russell King3216a972008-09-25 22:23:31 +0100169static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
170 dma_addr_t *handle, gfp_t gfp)
Russell Kingf454aa62007-02-12 19:26:05 +0000171{
172 return NULL;
173}
174
Russell King3216a972008-09-25 22:23:31 +0100175static inline void dma_free_noncoherent(struct device *dev, size_t size,
176 void *cpu_addr, dma_addr_t handle)
Russell Kingf454aa62007-02-12 19:26:05 +0000177{
178}
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180/**
181 * dma_alloc_coherent - allocate consistent memory for DMA
182 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
183 * @size: required memory size
184 * @handle: bus-specific DMA address
185 *
186 * Allocate some uncached, unbuffered memory for a device for
187 * performing DMA. This function allocates pages, and will
188 * return the CPU-viewed address, and sets @handle to be the
189 * device-viewed address.
190 */
Russell King3216a972008-09-25 22:23:31 +0100191extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193/**
194 * dma_free_coherent - free memory allocated by dma_alloc_coherent
195 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
196 * @size: size of memory originally requested in dma_alloc_coherent
197 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
198 * @handle: device-view address returned from dma_alloc_coherent
199 *
200 * Free (and unmap) a DMA buffer previously allocated by
201 * dma_alloc_coherent().
202 *
203 * References to memory and mappings associated with cpu_addr/handle
204 * during and after this call executing are illegal.
205 */
Russell King3216a972008-09-25 22:23:31 +0100206extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208/**
209 * dma_mmap_coherent - map a coherent DMA allocation into user space
210 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
211 * @vma: vm_area_struct describing requested user mapping
212 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
213 * @handle: device-view address returned from dma_alloc_coherent
214 * @size: size of memory originally requested in dma_alloc_coherent
215 *
216 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
217 * into user space. The coherent DMA buffer must not be freed by the
218 * driver until the user space mapping has been released.
219 */
Russell King3216a972008-09-25 22:23:31 +0100220int dma_mmap_coherent(struct device *, struct vm_area_struct *,
221 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223
224/**
225 * dma_alloc_writecombine - allocate writecombining memory for DMA
226 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
227 * @size: required memory size
228 * @handle: bus-specific DMA address
229 *
230 * Allocate some uncached, buffered memory for a device for
231 * performing DMA. This function allocates pages, and will
232 * return the CPU-viewed address, and sets @handle to be the
233 * device-viewed address.
234 */
Russell King3216a972008-09-25 22:23:31 +0100235extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
236 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238#define dma_free_writecombine(dev,size,cpu_addr,handle) \
239 dma_free_coherent(dev,size,cpu_addr,handle)
240
Russell King3216a972008-09-25 22:23:31 +0100241int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
242 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244
Russell King8c8a0ec2008-09-25 21:52:49 +0100245#ifdef CONFIG_DMABOUNCE
246/*
247 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
248 * and utilize bounce buffers as needed to work around limited DMA windows.
249 *
250 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
251 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
252 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
253 *
254 * The following are helper functions used by the dmabounce subystem
255 *
256 */
257
258/**
259 * dmabounce_register_dev
260 *
261 * @dev: valid struct device pointer
262 * @small_buf_size: size of buffers to use with small buffer pool
263 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
264 *
265 * This function should be called by low-level platform code to register
266 * a device as requireing DMA buffer bouncing. The function will allocate
267 * appropriate DMA pools for the device.
268 *
269 */
Russell King3216a972008-09-25 22:23:31 +0100270extern int dmabounce_register_dev(struct device *, unsigned long,
271 unsigned long);
Russell King8c8a0ec2008-09-25 21:52:49 +0100272
273/**
274 * dmabounce_unregister_dev
275 *
276 * @dev: valid struct device pointer
277 *
278 * This function should be called by low-level platform code when device
279 * that was previously registered with dmabounce_register_dev is removed
280 * from the system.
281 *
282 */
283extern void dmabounce_unregister_dev(struct device *);
284
285/**
286 * dma_needs_bounce
287 *
288 * @dev: valid struct device pointer
289 * @dma_handle: dma_handle of unbounced buffer
290 * @size: size of region being mapped
291 *
292 * Platforms that utilize the dmabounce mechanism must implement
293 * this function.
294 *
295 * The dmabounce routines call this function whenever a dma-mapping
296 * is requested to determine whether a given buffer needs to be bounced
297 * or not. The function must return 0 if the buffer is OK for
298 * DMA access and 1 if the buffer needs to be bounced.
299 *
300 */
Eric Miao4fa55182010-06-05 15:16:17 +0800301#ifdef CONFIG_SA1111
Russell King8c8a0ec2008-09-25 21:52:49 +0100302extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
Eric Miao4fa55182010-06-05 15:16:17 +0800303#else
304static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
305 size_t size)
306{
307 return 0;
308}
309#endif
Russell King8c8a0ec2008-09-25 21:52:49 +0100310
311/*
Russell King125ab122008-09-25 22:16:22 +0100312 * The DMA API, implemented by dmabounce.c. See below for descriptions.
313 */
Russell King3216a972008-09-25 22:23:31 +0100314extern dma_addr_t dma_map_single(struct device *, void *, size_t,
315 enum dma_data_direction);
Russell King29cb8d02009-10-31 16:10:10 +0000316extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
317 enum dma_data_direction);
Russell King3216a972008-09-25 22:23:31 +0100318extern dma_addr_t dma_map_page(struct device *, struct page *,
319 unsigned long, size_t, enum dma_data_direction);
Russell King29cb8d02009-10-31 16:10:10 +0000320extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
Russell King3216a972008-09-25 22:23:31 +0100321 enum dma_data_direction);
Russell King125ab122008-09-25 22:16:22 +0100322
323/*
Russell King8c8a0ec2008-09-25 21:52:49 +0100324 * Private functions
325 */
326int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100327 size_t, enum dma_data_direction);
Russell King8c8a0ec2008-09-25 21:52:49 +0100328int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100329 size_t, enum dma_data_direction);
Russell King8c8a0ec2008-09-25 21:52:49 +0100330#else
Russell King9fa76792008-11-13 14:33:51 +0000331static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
332 unsigned long offset, size_t size, enum dma_data_direction dir)
333{
334 return 1;
335}
336
337static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
338 unsigned long offset, size_t size, enum dma_data_direction dir)
339{
340 return 1;
341}
Russell King8c8a0ec2008-09-25 21:52:49 +0100342
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344/**
345 * dma_map_single - map a single buffer for streaming DMA
346 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
347 * @cpu_addr: CPU direct mapped address of buffer
348 * @size: size of buffer to map
349 * @dir: DMA transfer direction
350 *
351 * Ensure that any data held in the cache is appropriately discarded
352 * or written back.
353 *
354 * The device owns this memory once this call has completed. The CPU
355 * can regain ownership by calling dma_unmap_single() or
356 * dma_sync_single_for_cpu().
357 */
Russell King3216a972008-09-25 22:23:31 +0100358static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
359 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Russell King0e18b5d2008-09-29 13:48:17 +0100361 BUG_ON(!valid_dma_direction(dir));
362
Russell King18eabe22009-10-31 16:52:16 +0000363 __dma_single_cpu_to_dev(cpu_addr, size, dir);
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100364
Russell King98ed7d42008-08-10 12:10:49 +0100365 return virt_to_dma(dev, cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366}
Russell King125ab122008-09-25 22:16:22 +0100367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368/**
369 * dma_map_page - map a portion of a page for streaming DMA
370 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
371 * @page: page that buffer resides in
372 * @offset: offset into page for start of buffer
373 * @size: size of buffer to map
374 * @dir: DMA transfer direction
375 *
376 * Ensure that any data held in the cache is appropriately discarded
377 * or written back.
378 *
379 * The device owns this memory once this call has completed. The CPU
Russell King7807c602008-09-30 11:30:24 +0100380 * can regain ownership by calling dma_unmap_page().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 */
Russell King3216a972008-09-25 22:23:31 +0100382static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
383 unsigned long offset, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
Russell King0e18b5d2008-09-29 13:48:17 +0100385 BUG_ON(!valid_dma_direction(dir));
386
Russell King18eabe22009-10-31 16:52:16 +0000387 __dma_page_cpu_to_dev(page, offset, size, dir);
Russell King56f55f82008-09-25 20:59:12 +0100388
389 return page_to_dma(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
391
392/**
393 * dma_unmap_single - unmap a single buffer previously mapped
394 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
395 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100396 * @size: size of buffer (same as passed to dma_map_single)
397 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 *
399 * Unmap a single streaming mode DMA translation. The handle and size
400 * must match what was provided in the previous dma_map_single() call.
401 * All other usages are undefined.
402 *
403 * After this call, reads by the CPU to the buffer are guaranteed to see
404 * whatever the device wrote there.
405 */
Russell King3216a972008-09-25 22:23:31 +0100406static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
407 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408{
Russell King18eabe22009-10-31 16:52:16 +0000409 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412/**
413 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
414 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
415 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100416 * @size: size of buffer (same as passed to dma_map_page)
417 * @dir: DMA transfer direction (same as passed to dma_map_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 *
Russell King7807c602008-09-30 11:30:24 +0100419 * Unmap a page streaming mode DMA translation. The handle and size
420 * must match what was provided in the previous dma_map_page() call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 * All other usages are undefined.
422 *
423 * After this call, reads by the CPU to the buffer are guaranteed to see
424 * whatever the device wrote there.
425 */
Russell King3216a972008-09-25 22:23:31 +0100426static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
427 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428{
Russell King18eabe22009-10-31 16:52:16 +0000429 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
430 size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
Russell King29cb8d02009-10-31 16:10:10 +0000432#endif /* CONFIG_DMABOUNCE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434/**
Russell King9dd42862008-08-10 12:18:26 +0100435 * dma_sync_single_range_for_cpu
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
437 * @handle: DMA address of buffer
Russell King9dd42862008-08-10 12:18:26 +0100438 * @offset: offset of region to start sync
439 * @size: size of region to sync
440 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 *
442 * Make physical memory consistent for a single streaming mode DMA
443 * translation after a transfer.
444 *
445 * If you perform a dma_map_single() but wish to interrogate the
446 * buffer using the cpu, yet do not wish to teardown the PCI dma
447 * mapping, you must call this function before doing so. At the
448 * next point you give the PCI dma address back to the card, you
449 * must first the perform a dma_sync_for_device, and then the
450 * device again owns the buffer.
451 */
Russell King3216a972008-09-25 22:23:31 +0100452static inline void dma_sync_single_range_for_cpu(struct device *dev,
453 dma_addr_t handle, unsigned long offset, size_t size,
454 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100455{
Russell King0e18b5d2008-09-29 13:48:17 +0100456 BUG_ON(!valid_dma_direction(dir));
457
Russell King18eabe22009-10-31 16:52:16 +0000458 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
459 return;
460
461 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
Russell King9dd42862008-08-10 12:18:26 +0100462}
463
Russell King3216a972008-09-25 22:23:31 +0100464static inline void dma_sync_single_range_for_device(struct device *dev,
465 dma_addr_t handle, unsigned long offset, size_t size,
466 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100467{
Russell King0e18b5d2008-09-29 13:48:17 +0100468 BUG_ON(!valid_dma_direction(dir));
469
Russell King8c8a0ec2008-09-25 21:52:49 +0100470 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
471 return;
472
Russell King18eabe22009-10-31 16:52:16 +0000473 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
Russell King9dd42862008-08-10 12:18:26 +0100474}
Russell King9dd42862008-08-10 12:18:26 +0100475
Russell King3216a972008-09-25 22:23:31 +0100476static inline void dma_sync_single_for_cpu(struct device *dev,
477 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Russell King9dd42862008-08-10 12:18:26 +0100479 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
Russell King3216a972008-09-25 22:23:31 +0100482static inline void dma_sync_single_for_device(struct device *dev,
483 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Russell King9dd42862008-08-10 12:18:26 +0100485 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Russell Kingafd1a322008-09-25 16:30:57 +0100488/*
489 * The scatter list versions of the above methods.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 */
Russell King3216a972008-09-25 22:23:31 +0100491extern int dma_map_sg(struct device *, struct scatterlist *, int,
492 enum dma_data_direction);
493extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
494 enum dma_data_direction);
495extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
496 enum dma_data_direction);
497extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
498 enum dma_data_direction);
Russell Kingafd1a322008-09-25 16:30:57 +0100499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501#endif /* __KERNEL__ */
502#endif