blob: 413a30314a6f6db4bfad27153077c304ec6d11e0 [file] [log] [blame]
Robin Getz96f10502009-09-24 14:11:24 +00001/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
Bryan Wu1394f032007-05-06 14:50:22 -07007#ifndef _BLACKFIN_DMA_MAPPING_H
8#define _BLACKFIN_DMA_MAPPING_H
9
Barry Songdd3b0e32009-11-23 03:47:24 +000010#include <asm/cacheflush.h>
11struct scatterlist;
Bryan Wu1394f032007-05-06 14:50:22 -070012
Bryan Wu1394f032007-05-06 14:50:22 -070013void *dma_alloc_coherent(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp);
15void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle);
17
18/*
19 * Now for the API extensions over the pci_ one
20 */
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Barry Songdd3b0e32009-11-23 03:47:24 +000023#define dma_supported(d, m) (1)
24#define dma_get_cache_alignment() (32)
25#define dma_is_consistent(d, h) (1)
Bryan Wu1394f032007-05-06 14:50:22 -070026
Barry Songdd3b0e32009-11-23 03:47:24 +000027static inline int
28dma_set_mask(struct device *dev, u64 dma_mask)
29{
30 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
31 return -EIO;
32
33 *dev->dma_mask = dma_mask;
34
35 return 0;
36}
37
38static inline int
39dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Mike Frysinger62273ee2008-11-18 17:48:22 +080040{
41 return 0;
42}
Sonic Zhang334280f2007-06-21 11:34:16 +080043
Barry Songdd3b0e32009-11-23 03:47:24 +000044extern void
45__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
46static inline void
47_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
48{
49 if (!__builtin_constant_p(dir)) {
50 __dma_sync(addr, size, dir);
51 return;
52 }
53
54 switch (dir) {
55 case DMA_NONE:
56 BUG();
57 case DMA_TO_DEVICE: /* writeback only */
58 flush_dcache_range(addr, addr + size);
59 break;
60 case DMA_FROM_DEVICE: /* invalidate only */
61 case DMA_BIDIRECTIONAL: /* flush and invalidate */
62 /* Blackfin has no dedicated invalidate (it includes a flush) */
63 invalidate_dcache_range(addr, addr + size);
64 break;
65 }
66}
67
Barry Songdd3b0e32009-11-23 03:47:24 +000068static inline dma_addr_t
69dma_map_single(struct device *dev, void *ptr, size_t size,
70 enum dma_data_direction dir)
71{
72 _dma_sync((dma_addr_t)ptr, size, dir);
73 return (dma_addr_t) ptr;
74}
Bryan Wu1394f032007-05-06 14:50:22 -070075
Bryan Wu9fcdc782008-04-23 07:41:52 +080076static inline dma_addr_t
77dma_map_page(struct device *dev, struct page *page,
78 unsigned long offset, size_t size,
79 enum dma_data_direction dir)
80{
81 return dma_map_single(dev, page_address(page) + offset, size, dir);
82}
83
Barry Songdd3b0e32009-11-23 03:47:24 +000084static inline void
85dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
86 enum dma_data_direction dir)
87{
88 BUG_ON(!valid_dma_direction(dir));
89}
Bryan Wu1394f032007-05-06 14:50:22 -070090
Bryan Wu9fcdc782008-04-23 07:41:52 +080091static inline void
92dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
93 enum dma_data_direction dir)
94{
95 dma_unmap_single(dev, dma_addr, size, dir);
96}
97
Bryan Wu1394f032007-05-06 14:50:22 -070098extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
Barry Songdd3b0e32009-11-23 03:47:24 +000099 enum dma_data_direction dir);
Bryan Wu1394f032007-05-06 14:50:22 -0700100
Barry Songdd3b0e32009-11-23 03:47:24 +0000101static inline void
102dma_unmap_sg(struct device *dev, struct scatterlist *sg,
103 int nhwentries, enum dma_data_direction dir)
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800104{
Barry Songdd3b0e32009-11-23 03:47:24 +0000105 BUG_ON(!valid_dma_direction(dir));
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800106}
107
Barry Songdd3b0e32009-11-23 03:47:24 +0000108static inline void
109dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
110 unsigned long offset, size_t size,
111 enum dma_data_direction dir)
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800112{
Barry Songdd3b0e32009-11-23 03:47:24 +0000113 BUG_ON(!valid_dma_direction(dir));
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800114}
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400115
Barry Songdd3b0e32009-11-23 03:47:24 +0000116static inline void
117dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
118 unsigned long offset, size_t size,
119 enum dma_data_direction dir)
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400120{
Barry Songdd3b0e32009-11-23 03:47:24 +0000121 _dma_sync(handle + offset, size, dir);
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400122}
123
Barry Songdd3b0e32009-11-23 03:47:24 +0000124static inline void
125dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
126 enum dma_data_direction dir)
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400127{
Barry Songdd3b0e32009-11-23 03:47:24 +0000128 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
129}
130
131static inline void
132dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
133 enum dma_data_direction dir)
134{
135 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
136}
137
138static inline void
139dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
140 enum dma_data_direction dir)
141{
142 BUG_ON(!valid_dma_direction(dir));
143}
144
145extern void
146dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
147 int nents, enum dma_data_direction dir);
148
149static inline void
150dma_cache_sync(struct device *dev, void *vaddr, size_t size,
151 enum dma_data_direction dir)
152{
153 _dma_sync((dma_addr_t)vaddr, size, dir);
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400154}
155
Bryan Wu1394f032007-05-06 14:50:22 -0700156#endif /* _BLACKFIN_DMA_MAPPING_H */