blob: 3c4a2c21d6069e3c27d1175cc2882a1ae203bcb2 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
18
19#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20
21#ifdef CONFIG_NOT_COHERENT_CACHE
22/*
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset,
33 size_t size, int direction);
34
35#else /* ! CONFIG_NOT_COHERENT_CACHE */
36/*
37 * Cache coherent cores.
38 */
39
40#define __dma_alloc_coherent(gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0)
44
45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
46
Mark Nelson3a4c6f02008-07-05 05:05:45 +100047static inline unsigned long device_to_mask(struct device *dev)
48{
49 if (dev->dma_mask && *dev->dma_mask)
50 return *dev->dma_mask;
51 /* Assume devices without mask can take 32 bit addresses */
52 return 0xfffffffful;
53}
54
Anton Blanchard33ff9102007-10-16 14:54:33 -050055/*
56 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
57 */
58struct dma_mapping_ops {
59 void * (*alloc_coherent)(struct device *dev, size_t size,
60 dma_addr_t *dma_handle, gfp_t flag);
61 void (*free_coherent)(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle);
Anton Blanchard33ff9102007-10-16 14:54:33 -050063 int (*map_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100064 int nents, enum dma_data_direction direction,
65 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050066 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100067 int nents, enum dma_data_direction direction,
68 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050069 int (*dma_supported)(struct device *dev, u64 mask);
70 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
Becky Bruce4fc665b2008-09-12 10:34:46 +000071 dma_addr_t (*map_page)(struct device *dev, struct page *page,
72 unsigned long offset, size_t size,
73 enum dma_data_direction direction,
74 struct dma_attrs *attrs);
75 void (*unmap_page)(struct device *dev,
76 dma_addr_t dma_address, size_t size,
77 enum dma_data_direction direction,
78 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050079};
80
Becky Bruce4fc665b2008-09-12 10:34:46 +000081/*
82 * Available generic sets of operations
83 */
84#ifdef CONFIG_PPC64
85extern struct dma_mapping_ops dma_iommu_ops;
86#endif
87extern struct dma_mapping_ops dma_direct_ops;
88
Anton Blanchard33ff9102007-10-16 14:54:33 -050089static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
90{
91 /* We don't handle the NULL dev case for ISA for now. We could
92 * do it via an out of line call but it is not needed for now. The
93 * only ISA DMA device we support is the floppy and we have a hack
94 * in the floppy driver directly to get a device for us.
95 */
Becky Bruce4fc665b2008-09-12 10:34:46 +000096
97 if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
98#ifdef CONFIG_PPC64
Anton Blanchard33ff9102007-10-16 14:54:33 -050099 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +0000100#else
101 /* Use default on 32-bit if dma_ops is not set up */
102 /* TODO: Long term, we should fix drivers so that dev and
103 * archdata dma_ops are set up for all buses.
104 */
105 return &dma_direct_ops;
106#endif
107 }
108
Anton Blanchard33ff9102007-10-16 14:54:33 -0500109 return dev->archdata.dma_ops;
110}
111
Michael Ellerman1f62a162008-01-30 01:13:58 +1100112static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
113{
114 dev->archdata.dma_ops = ops;
115}
116
Anton Blanchard33ff9102007-10-16 14:54:33 -0500117static inline int dma_supported(struct device *dev, u64 mask)
118{
119 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
120
121 if (unlikely(dma_ops == NULL))
122 return 0;
123 if (dma_ops->dma_supported == NULL)
124 return 1;
125 return dma_ops->dma_supported(dev, mask);
126}
127
Michael Ellerman84631f32007-12-17 17:35:53 +1100128/* We have our own implementation of pci_set_dma_mask() */
129#define HAVE_ARCH_PCI_SET_DMA_MASK
130
Anton Blanchard33ff9102007-10-16 14:54:33 -0500131static inline int dma_set_mask(struct device *dev, u64 dma_mask)
132{
133 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
134
135 if (unlikely(dma_ops == NULL))
136 return -EIO;
137 if (dma_ops->set_dma_mask != NULL)
138 return dma_ops->set_dma_mask(dev, dma_mask);
139 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
140 return -EIO;
141 *dev->dma_mask = dma_mask;
142 return 0;
143}
144
Becky Bruce4fc665b2008-09-12 10:34:46 +0000145/*
Mark Nelsonc73049f2008-10-27 20:38:14 +0000146 * map_/unmap_single actually call through to map/unmap_page now that all the
147 * dma_mapping_ops have been converted over. We just have to get the page and
148 * offset to pass through to map_page
Becky Bruce4fc665b2008-09-12 10:34:46 +0000149 */
Mark Nelson3affedc2008-07-05 05:05:42 +1000150static inline dma_addr_t dma_map_single_attrs(struct device *dev,
151 void *cpu_addr,
152 size_t size,
153 enum dma_data_direction direction,
154 struct dma_attrs *attrs)
155{
156 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
157
158 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000159
Becky Bruce4fc665b2008-09-12 10:34:46 +0000160 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
161 (unsigned long)cpu_addr % PAGE_SIZE, size,
162 direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000163}
164
165static inline void dma_unmap_single_attrs(struct device *dev,
166 dma_addr_t dma_addr,
167 size_t size,
168 enum dma_data_direction direction,
169 struct dma_attrs *attrs)
170{
171 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
172
173 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000174
Becky Bruce4fc665b2008-09-12 10:34:46 +0000175 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000176}
177
178static inline dma_addr_t dma_map_page_attrs(struct device *dev,
179 struct page *page,
180 unsigned long offset, size_t size,
181 enum dma_data_direction direction,
182 struct dma_attrs *attrs)
183{
184 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
185
186 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000187
Mark Nelsonc73049f2008-10-27 20:38:14 +0000188 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000189}
190
191static inline void dma_unmap_page_attrs(struct device *dev,
192 dma_addr_t dma_address,
193 size_t size,
194 enum dma_data_direction direction,
195 struct dma_attrs *attrs)
196{
197 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
198
199 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000200
Mark Nelsonc73049f2008-10-27 20:38:14 +0000201 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000202}
203
204static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
205 int nents, enum dma_data_direction direction,
206 struct dma_attrs *attrs)
207{
208 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
209
210 BUG_ON(!dma_ops);
211 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
212}
213
214static inline void dma_unmap_sg_attrs(struct device *dev,
215 struct scatterlist *sg,
216 int nhwentries,
217 enum dma_data_direction direction,
218 struct dma_attrs *attrs)
219{
220 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
221
222 BUG_ON(!dma_ops);
223 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
224}
225
Anton Blanchard33ff9102007-10-16 14:54:33 -0500226static inline void *dma_alloc_coherent(struct device *dev, size_t size,
227 dma_addr_t *dma_handle, gfp_t flag)
228{
229 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
230
231 BUG_ON(!dma_ops);
232 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
233}
234
235static inline void dma_free_coherent(struct device *dev, size_t size,
236 void *cpu_addr, dma_addr_t dma_handle)
237{
238 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
239
240 BUG_ON(!dma_ops);
241 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
242}
243
244static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
245 size_t size,
246 enum dma_data_direction direction)
247{
Mark Nelson3affedc2008-07-05 05:05:42 +1000248 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500249}
250
251static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
252 size_t size,
253 enum dma_data_direction direction)
254{
Mark Nelson3affedc2008-07-05 05:05:42 +1000255 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500256}
257
258static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
259 unsigned long offset, size_t size,
260 enum dma_data_direction direction)
261{
Mark Nelson3affedc2008-07-05 05:05:42 +1000262 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500263}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100264
265static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
266 size_t size,
267 enum dma_data_direction direction)
268{
Mark Nelson3affedc2008-07-05 05:05:42 +1000269 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100270}
271
272static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
273 int nents, enum dma_data_direction direction)
274{
Mark Nelson3affedc2008-07-05 05:05:42 +1000275 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100276}
277
278static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
279 int nhwentries,
280 enum dma_data_direction direction)
281{
Mark Nelson3affedc2008-07-05 05:05:42 +1000282 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100283}
284
Stephen Rothwell78b09732005-11-19 01:40:46 +1100285static inline void dma_sync_single_for_cpu(struct device *dev,
286 dma_addr_t dma_handle, size_t size,
287 enum dma_data_direction direction)
288{
289 BUG_ON(direction == DMA_NONE);
290 __dma_sync(bus_to_virt(dma_handle), size, direction);
291}
292
293static inline void dma_sync_single_for_device(struct device *dev,
294 dma_addr_t dma_handle, size_t size,
295 enum dma_data_direction direction)
296{
297 BUG_ON(direction == DMA_NONE);
298 __dma_sync(bus_to_virt(dma_handle), size, direction);
299}
300
301static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200302 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100303 enum dma_data_direction direction)
304{
Jens Axboe78bdc312007-10-12 13:44:12 +0200305 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100306 int i;
307
308 BUG_ON(direction == DMA_NONE);
309
Jens Axboe78bdc312007-10-12 13:44:12 +0200310 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200311 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100312}
313
314static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200315 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100316 enum dma_data_direction direction)
317{
Jens Axboe78bdc312007-10-12 13:44:12 +0200318 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100319 int i;
320
321 BUG_ON(direction == DMA_NONE);
322
Jens Axboe78bdc312007-10-12 13:44:12 +0200323 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200324 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100325}
326
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700327static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100328{
329#ifdef CONFIG_PPC64
330 return (dma_addr == DMA_ERROR_CODE);
331#else
332 return 0;
333#endif
334}
335
336#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
337#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
338#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800339#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100340#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800341#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100342#endif
343
344static inline int dma_get_cache_alignment(void)
345{
346#ifdef CONFIG_PPC64
347 /* no easy way to get cache size on all processors, so return
348 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800349 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100350#else
351 /*
352 * Each processor family will define its own L1_CACHE_SHIFT,
353 * L1_CACHE_BYTES wraps to this, so this is always safe.
354 */
355 return L1_CACHE_BYTES;
356#endif
357}
358
359static inline void dma_sync_single_range_for_cpu(struct device *dev,
360 dma_addr_t dma_handle, unsigned long offset, size_t size,
361 enum dma_data_direction direction)
362{
363 /* just sync everything for now */
364 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
365}
366
367static inline void dma_sync_single_range_for_device(struct device *dev,
368 dma_addr_t dma_handle, unsigned long offset, size_t size,
369 enum dma_data_direction direction)
370{
371 /* just sync everything for now */
372 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
373}
374
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800375static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100376 enum dma_data_direction direction)
377{
378 BUG_ON(direction == DMA_NONE);
379 __dma_sync(vaddr, size, (int)direction);
380}
381
Arnd Bergmann88ced032005-12-16 22:43:46 +0100382#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100383#endif /* _ASM_DMA_MAPPING_H */