blob: e974876e18d2c98c7239981c1c49b693a73b11a6 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <asm/io.h>
17
18#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
19
20#ifdef CONFIG_NOT_COHERENT_CACHE
21/*
22 * DMA-consistent mapping functions for PowerPCs that don't support
23 * cache snooping. These allocate/free a region of uncached mapped
24 * memory space for use with DMA devices. Alternatively, you could
25 * allocate the space "normally" and use the cache management functions
26 * to ensure it is consistent.
27 */
28extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
29extern void __dma_free_coherent(size_t size, void *vaddr);
30extern void __dma_sync(void *vaddr, size_t size, int direction);
31extern void __dma_sync_page(struct page *page, unsigned long offset,
32 size_t size, int direction);
33
34#else /* ! CONFIG_NOT_COHERENT_CACHE */
35/*
36 * Cache coherent cores.
37 */
38
39#define __dma_alloc_coherent(gfp, size, handle) NULL
40#define __dma_free_coherent(size, addr) ((void)0)
41#define __dma_sync(addr, size, rw) ((void)0)
42#define __dma_sync_page(pg, off, sz, rw) ((void)0)
43
44#endif /* ! CONFIG_NOT_COHERENT_CACHE */
45
46#ifdef CONFIG_PPC64
47/*
48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
49 */
50struct dma_mapping_ops {
51 void * (*alloc_coherent)(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, gfp_t flag);
53 void (*free_coherent)(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle);
55 dma_addr_t (*map_single)(struct device *dev, void *ptr,
56 size_t size, enum dma_data_direction direction);
57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction);
59 int (*map_sg)(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction direction);
61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
62 int nents, enum dma_data_direction direction);
63 int (*dma_supported)(struct device *dev, u64 mask);
64 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
65};
66
67static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68{
69 /* We don't handle the NULL dev case for ISA for now. We could
70 * do it via an out of line call but it is not needed for now. The
71 * only ISA DMA device we support is the floppy and we have a hack
72 * in the floppy driver directly to get a device for us.
73 */
74 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
75 return NULL;
76 return dev->archdata.dma_ops;
77}
78
79static inline int dma_supported(struct device *dev, u64 mask)
80{
81 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
82
83 if (unlikely(dma_ops == NULL))
84 return 0;
85 if (dma_ops->dma_supported == NULL)
86 return 1;
87 return dma_ops->dma_supported(dev, mask);
88}
89
Michael Ellerman84631f32007-12-17 17:35:53 +110090/* We have our own implementation of pci_set_dma_mask() */
91#define HAVE_ARCH_PCI_SET_DMA_MASK
92
Anton Blanchard33ff9102007-10-16 14:54:33 -050093static inline int dma_set_mask(struct device *dev, u64 dma_mask)
94{
95 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
96
97 if (unlikely(dma_ops == NULL))
98 return -EIO;
99 if (dma_ops->set_dma_mask != NULL)
100 return dma_ops->set_dma_mask(dev, dma_mask);
101 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
102 return -EIO;
103 *dev->dma_mask = dma_mask;
104 return 0;
105}
106
107static inline void *dma_alloc_coherent(struct device *dev, size_t size,
108 dma_addr_t *dma_handle, gfp_t flag)
109{
110 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
111
112 BUG_ON(!dma_ops);
113 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
114}
115
116static inline void dma_free_coherent(struct device *dev, size_t size,
117 void *cpu_addr, dma_addr_t dma_handle)
118{
119 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
120
121 BUG_ON(!dma_ops);
122 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
123}
124
125static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
126 size_t size,
127 enum dma_data_direction direction)
128{
129 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
130
131 BUG_ON(!dma_ops);
132 return dma_ops->map_single(dev, cpu_addr, size, direction);
133}
134
135static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
136 size_t size,
137 enum dma_data_direction direction)
138{
139 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
140
141 BUG_ON(!dma_ops);
142 dma_ops->unmap_single(dev, dma_addr, size, direction);
143}
144
145static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
146 unsigned long offset, size_t size,
147 enum dma_data_direction direction)
148{
149 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
150
151 BUG_ON(!dma_ops);
152 return dma_ops->map_single(dev, page_address(page) + offset, size,
153 direction);
154}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100155
156static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
157 size_t size,
158 enum dma_data_direction direction)
159{
160 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
161
162 BUG_ON(!dma_ops);
163 dma_ops->unmap_single(dev, dma_address, size, direction);
164}
165
166static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
167 int nents, enum dma_data_direction direction)
168{
169 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
170
171 BUG_ON(!dma_ops);
172 return dma_ops->map_sg(dev, sg, nents, direction);
173}
174
175static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
176 int nhwentries,
177 enum dma_data_direction direction)
178{
179 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
180
181 BUG_ON(!dma_ops);
182 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
183}
184
185
186/*
187 * Available generic sets of operations
188 */
189extern struct dma_mapping_ops dma_iommu_ops;
190extern struct dma_mapping_ops dma_direct_ops;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100191
Benjamin Herrenschmidt92b20c42006-11-11 17:25:14 +1100192extern unsigned long dma_direct_offset;
193
Stephen Rothwell78b09732005-11-19 01:40:46 +1100194#else /* CONFIG_PPC64 */
195
196#define dma_supported(dev, mask) (1)
197
198static inline int dma_set_mask(struct device *dev, u64 dma_mask)
199{
200 if (!dev->dma_mask || !dma_supported(dev, mask))
201 return -EIO;
202
203 *dev->dma_mask = dma_mask;
204
205 return 0;
206}
207
208static inline void *dma_alloc_coherent(struct device *dev, size_t size,
209 dma_addr_t * dma_handle,
210 gfp_t gfp)
211{
212#ifdef CONFIG_NOT_COHERENT_CACHE
213 return __dma_alloc_coherent(size, dma_handle, gfp);
214#else
215 void *ret;
216 /* ignore region specifiers */
217 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
218
219 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
220 gfp |= GFP_DMA;
221
222 ret = (void *)__get_free_pages(gfp, get_order(size));
223
224 if (ret != NULL) {
225 memset(ret, 0, size);
226 *dma_handle = virt_to_bus(ret);
227 }
228
229 return ret;
230#endif
231}
232
233static inline void
234dma_free_coherent(struct device *dev, size_t size, void *vaddr,
235 dma_addr_t dma_handle)
236{
237#ifdef CONFIG_NOT_COHERENT_CACHE
238 __dma_free_coherent(size, vaddr);
239#else
240 free_pages((unsigned long)vaddr, get_order(size));
241#endif
242}
243
244static inline dma_addr_t
245dma_map_single(struct device *dev, void *ptr, size_t size,
246 enum dma_data_direction direction)
247{
248 BUG_ON(direction == DMA_NONE);
249
250 __dma_sync(ptr, size, direction);
251
252 return virt_to_bus(ptr);
253}
254
Segher Boessenkoolf7742162007-08-02 01:41:15 +1000255static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
256 size_t size,
257 enum dma_data_direction direction)
258{
259 /* We do nothing. */
260}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100261
262static inline dma_addr_t
263dma_map_page(struct device *dev, struct page *page,
264 unsigned long offset, size_t size,
265 enum dma_data_direction direction)
266{
267 BUG_ON(direction == DMA_NONE);
268
269 __dma_sync_page(page, offset, size, direction);
270
271 return page_to_bus(page) + offset;
272}
273
Segher Boessenkoolf7742162007-08-02 01:41:15 +1000274static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
275 size_t size,
276 enum dma_data_direction direction)
277{
278 /* We do nothing. */
279}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100280
281static inline int
Jens Axboe78bdc312007-10-12 13:44:12 +0200282dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100283 enum dma_data_direction direction)
284{
Jens Axboe78bdc312007-10-12 13:44:12 +0200285 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100286 int i;
287
288 BUG_ON(direction == DMA_NONE);
289
Jens Axboe78bdc312007-10-12 13:44:12 +0200290 for_each_sg(sgl, sg, nents, i) {
Olof Johansson5edadbd2007-10-23 09:13:14 +0200291 BUG_ON(!sg_page(sg));
292 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
293 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100294 }
295
296 return nents;
297}
298
Segher Boessenkoolf7742162007-08-02 01:41:15 +1000299static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
300 int nhwentries,
301 enum dma_data_direction direction)
302{
303 /* We don't do anything here. */
304}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100305
306#endif /* CONFIG_PPC64 */
307
308static inline void dma_sync_single_for_cpu(struct device *dev,
309 dma_addr_t dma_handle, size_t size,
310 enum dma_data_direction direction)
311{
312 BUG_ON(direction == DMA_NONE);
313 __dma_sync(bus_to_virt(dma_handle), size, direction);
314}
315
316static inline void dma_sync_single_for_device(struct device *dev,
317 dma_addr_t dma_handle, size_t size,
318 enum dma_data_direction direction)
319{
320 BUG_ON(direction == DMA_NONE);
321 __dma_sync(bus_to_virt(dma_handle), size, direction);
322}
323
324static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200325 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100326 enum dma_data_direction direction)
327{
Jens Axboe78bdc312007-10-12 13:44:12 +0200328 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100329 int i;
330
331 BUG_ON(direction == DMA_NONE);
332
Jens Axboe78bdc312007-10-12 13:44:12 +0200333 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200334 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100335}
336
337static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200338 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100339 enum dma_data_direction direction)
340{
Jens Axboe78bdc312007-10-12 13:44:12 +0200341 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100342 int i;
343
344 BUG_ON(direction == DMA_NONE);
345
Jens Axboe78bdc312007-10-12 13:44:12 +0200346 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200347 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100348}
349
350static inline int dma_mapping_error(dma_addr_t dma_addr)
351{
352#ifdef CONFIG_PPC64
353 return (dma_addr == DMA_ERROR_CODE);
354#else
355 return 0;
356#endif
357}
358
359#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
360#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
361#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800362#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100363#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800364#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100365#endif
366
367static inline int dma_get_cache_alignment(void)
368{
369#ifdef CONFIG_PPC64
370 /* no easy way to get cache size on all processors, so return
371 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800372 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100373#else
374 /*
375 * Each processor family will define its own L1_CACHE_SHIFT,
376 * L1_CACHE_BYTES wraps to this, so this is always safe.
377 */
378 return L1_CACHE_BYTES;
379#endif
380}
381
382static inline void dma_sync_single_range_for_cpu(struct device *dev,
383 dma_addr_t dma_handle, unsigned long offset, size_t size,
384 enum dma_data_direction direction)
385{
386 /* just sync everything for now */
387 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
388}
389
390static inline void dma_sync_single_range_for_device(struct device *dev,
391 dma_addr_t dma_handle, unsigned long offset, size_t size,
392 enum dma_data_direction direction)
393{
394 /* just sync everything for now */
395 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
396}
397
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800398static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100399 enum dma_data_direction direction)
400{
401 BUG_ON(direction == DMA_NONE);
402 __dma_sync(vaddr, size, (int)direction);
403}
404
Arnd Bergmann88ced032005-12-16 22:43:46 +0100405#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100406#endif /* _ASM_DMA_MAPPING_H */