blob: e63f6d9fdab88de6d2c6cac1777f25cd37b5eeb0 [file] [log] [blame]
Robin Getz96f10502009-09-24 14:11:24 +00001/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
Bryan Wu1394f032007-05-06 14:50:22 -07007#ifndef _BLACKFIN_DMA_MAPPING_H
8#define _BLACKFIN_DMA_MAPPING_H
9
Barry Songdd3b0e32009-11-23 03:47:24 +000010#include <asm/cacheflush.h>
11struct scatterlist;
Bryan Wu1394f032007-05-06 14:50:22 -070012
Bryan Wu1394f032007-05-06 14:50:22 -070013void *dma_alloc_coherent(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp);
15void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle);
17
18/*
19 * Now for the API extensions over the pci_ one
20 */
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Barry Songdd3b0e32009-11-23 03:47:24 +000023#define dma_supported(d, m) (1)
24#define dma_get_cache_alignment() (32)
25#define dma_is_consistent(d, h) (1)
Bryan Wu1394f032007-05-06 14:50:22 -070026
Barry Songdd3b0e32009-11-23 03:47:24 +000027static inline int
28dma_set_mask(struct device *dev, u64 dma_mask)
29{
30 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
31 return -EIO;
32
33 *dev->dma_mask = dma_mask;
34
35 return 0;
36}
37
38static inline int
39dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Mike Frysinger62273ee2008-11-18 17:48:22 +080040{
41 return 0;
42}
Sonic Zhang334280f2007-06-21 11:34:16 +080043
Barry Songdd3b0e32009-11-23 03:47:24 +000044extern void
45__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
46static inline void
Sonic Zhanga3a6a592009-12-16 07:52:52 +000047__dma_sync_inline(dma_addr_t addr, size_t size, enum dma_data_direction dir)
Barry Songdd3b0e32009-11-23 03:47:24 +000048{
Barry Songdd3b0e32009-11-23 03:47:24 +000049 switch (dir) {
50 case DMA_NONE:
51 BUG();
52 case DMA_TO_DEVICE: /* writeback only */
53 flush_dcache_range(addr, addr + size);
54 break;
55 case DMA_FROM_DEVICE: /* invalidate only */
56 case DMA_BIDIRECTIONAL: /* flush and invalidate */
57 /* Blackfin has no dedicated invalidate (it includes a flush) */
58 invalidate_dcache_range(addr, addr + size);
59 break;
60 }
61}
Sonic Zhanga3a6a592009-12-16 07:52:52 +000062static inline void
63_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
64{
65 if (__builtin_constant_p(dir))
66 __dma_sync_inline(addr, size, dir);
67 else
68 __dma_sync(addr, size, dir);
69}
Barry Songdd3b0e32009-11-23 03:47:24 +000070
Bryan Wu1394f032007-05-06 14:50:22 -070071/*
72 * Map a single buffer of the indicated size for DMA in streaming mode.
73 * The 32-bit bus address to use is returned.
74 *
75 * Once the device is given the dma address, the device owns this memory
76 * until either pci_unmap_single or pci_dma_sync_single is performed.
77 */
Barry Songdd3b0e32009-11-23 03:47:24 +000078static inline dma_addr_t
79dma_map_single(struct device *dev, void *ptr, size_t size,
80 enum dma_data_direction dir)
81{
82 _dma_sync((dma_addr_t)ptr, size, dir);
83 return (dma_addr_t) ptr;
84}
Bryan Wu1394f032007-05-06 14:50:22 -070085
Bryan Wu9fcdc782008-04-23 07:41:52 +080086static inline dma_addr_t
87dma_map_page(struct device *dev, struct page *page,
88 unsigned long offset, size_t size,
89 enum dma_data_direction dir)
90{
91 return dma_map_single(dev, page_address(page) + offset, size, dir);
92}
93
Bryan Wu1394f032007-05-06 14:50:22 -070094/*
95 * Unmap a single streaming mode DMA translation. The dma_addr and size
96 * must match what was provided for in a previous pci_map_single call. All
97 * other usages are undefined.
98 *
99 * After this call, reads by the cpu to the buffer are guarenteed to see
100 * whatever the device wrote there.
101 */
Barry Songdd3b0e32009-11-23 03:47:24 +0000102static inline void
103dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
104 enum dma_data_direction dir)
105{
106 BUG_ON(!valid_dma_direction(dir));
107}
Bryan Wu1394f032007-05-06 14:50:22 -0700108
Bryan Wu9fcdc782008-04-23 07:41:52 +0800109static inline void
110dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
111 enum dma_data_direction dir)
112{
113 dma_unmap_single(dev, dma_addr, size, dir);
114}
115
Bryan Wu1394f032007-05-06 14:50:22 -0700116/*
117 * Map a set of buffers described by scatterlist in streaming
118 * mode for DMA. This is the scather-gather version of the
119 * above pci_map_single interface. Here the scatter gather list
120 * elements are each tagged with the appropriate dma address
121 * and length. They are obtained via sg_dma_{address,length}(SG).
122 *
123 * NOTE: An implementation may be able to use a smaller number of
124 * DMA address/length pairs than there are SG table elements.
125 * (for example via virtual mapping capabilities)
126 * The routine returns the number of addr/length pairs actually
127 * used, at most nents.
128 *
129 * Device ownership issues as mentioned above for pci_map_single are
130 * the same here.
131 */
132extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
Barry Songdd3b0e32009-11-23 03:47:24 +0000133 enum dma_data_direction dir);
Bryan Wu1394f032007-05-06 14:50:22 -0700134
135/*
136 * Unmap a set of streaming mode DMA translations.
137 * Again, cpu read rules concerning calls here are the same as for
138 * pci_unmap_single() above.
139 */
Barry Songdd3b0e32009-11-23 03:47:24 +0000140static inline void
141dma_unmap_sg(struct device *dev, struct scatterlist *sg,
142 int nhwentries, enum dma_data_direction dir)
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800143{
Barry Songdd3b0e32009-11-23 03:47:24 +0000144 BUG_ON(!valid_dma_direction(dir));
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800145}
146
Barry Songdd3b0e32009-11-23 03:47:24 +0000147static inline void
148dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
149 unsigned long offset, size_t size,
150 enum dma_data_direction dir)
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800151{
Barry Songdd3b0e32009-11-23 03:47:24 +0000152 BUG_ON(!valid_dma_direction(dir));
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800153}
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400154
Barry Songdd3b0e32009-11-23 03:47:24 +0000155static inline void
156dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
157 unsigned long offset, size_t size,
158 enum dma_data_direction dir)
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400159{
Barry Songdd3b0e32009-11-23 03:47:24 +0000160 _dma_sync(handle + offset, size, dir);
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400161}
162
Barry Songdd3b0e32009-11-23 03:47:24 +0000163static inline void
164dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
165 enum dma_data_direction dir)
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400166{
Barry Songdd3b0e32009-11-23 03:47:24 +0000167 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
168}
169
170static inline void
171dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
172 enum dma_data_direction dir)
173{
174 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
175}
176
177static inline void
178dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
179 enum dma_data_direction dir)
180{
181 BUG_ON(!valid_dma_direction(dir));
182}
183
184extern void
185dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
186 int nents, enum dma_data_direction dir);
187
188static inline void
189dma_cache_sync(struct device *dev, void *vaddr, size_t size,
190 enum dma_data_direction dir)
191{
192 _dma_sync((dma_addr_t)vaddr, size, dir);
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400193}
194
Bryan Wu1394f032007-05-06 14:50:22 -0700195#endif /* _BLACKFIN_DMA_MAPPING_H */