blob: 6ff1f8581d7958e1e1911da5ffa4be8e41685184 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
Becky Bruceec3cf2e2009-05-14 12:42:28 +000018#include <asm/swiotlb.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050019
20#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
21
Becky Bruceec3cf2e2009-05-14 12:42:28 +000022/* Some dma direct funcs must be visible for use in other dma_ops */
23extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t flag);
25extern void dma_direct_free_coherent(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
27
28extern unsigned long get_dma_direct_offset(struct device *dev);
29
Anton Blanchard33ff9102007-10-16 14:54:33 -050030#ifdef CONFIG_NOT_COHERENT_CACHE
31/*
32 * DMA-consistent mapping functions for PowerPCs that don't support
33 * cache snooping. These allocate/free a region of uncached mapped
34 * memory space for use with DMA devices. Alternatively, you could
35 * allocate the space "normally" and use the cache management functions
36 * to ensure it is consistent.
37 */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100038struct device;
39extern void *__dma_alloc_coherent(struct device *dev, size_t size,
40 dma_addr_t *handle, gfp_t gfp);
Anton Blanchard33ff9102007-10-16 14:54:33 -050041extern void __dma_free_coherent(size_t size, void *vaddr);
42extern void __dma_sync(void *vaddr, size_t size, int direction);
43extern void __dma_sync_page(struct page *page, unsigned long offset,
44 size_t size, int direction);
45
46#else /* ! CONFIG_NOT_COHERENT_CACHE */
47/*
48 * Cache coherent cores.
49 */
50
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100051#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
Anton Blanchard33ff9102007-10-16 14:54:33 -050052#define __dma_free_coherent(size, addr) ((void)0)
53#define __dma_sync(addr, size, rw) ((void)0)
54#define __dma_sync_page(pg, off, sz, rw) ((void)0)
55
56#endif /* ! CONFIG_NOT_COHERENT_CACHE */
57
Mark Nelson3a4c6f02008-07-05 05:05:45 +100058static inline unsigned long device_to_mask(struct device *dev)
59{
60 if (dev->dma_mask && *dev->dma_mask)
61 return *dev->dma_mask;
62 /* Assume devices without mask can take 32 bit addresses */
63 return 0xfffffffful;
64}
65
Anton Blanchard33ff9102007-10-16 14:54:33 -050066/*
67 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
68 */
69struct dma_mapping_ops {
70 void * (*alloc_coherent)(struct device *dev, size_t size,
71 dma_addr_t *dma_handle, gfp_t flag);
72 void (*free_coherent)(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle);
Anton Blanchard33ff9102007-10-16 14:54:33 -050074 int (*map_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100075 int nents, enum dma_data_direction direction,
76 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050077 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100078 int nents, enum dma_data_direction direction,
79 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050080 int (*dma_supported)(struct device *dev, u64 mask);
81 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
Becky Bruce4fc665b2008-09-12 10:34:46 +000082 dma_addr_t (*map_page)(struct device *dev, struct page *page,
83 unsigned long offset, size_t size,
84 enum dma_data_direction direction,
85 struct dma_attrs *attrs);
86 void (*unmap_page)(struct device *dev,
87 dma_addr_t dma_address, size_t size,
88 enum dma_data_direction direction,
89 struct dma_attrs *attrs);
Becky Bruceec3cf2e2009-05-14 12:42:28 +000090 int (*addr_needs_map)(struct device *dev, dma_addr_t addr,
91 size_t size);
Becky Bruce15e09c02008-11-20 06:49:16 +000092#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
93 void (*sync_single_range_for_cpu)(struct device *hwdev,
94 dma_addr_t dma_handle, unsigned long offset,
95 size_t size,
96 enum dma_data_direction direction);
97 void (*sync_single_range_for_device)(struct device *hwdev,
98 dma_addr_t dma_handle, unsigned long offset,
99 size_t size,
100 enum dma_data_direction direction);
101 void (*sync_sg_for_cpu)(struct device *hwdev,
102 struct scatterlist *sg, int nelems,
103 enum dma_data_direction direction);
104 void (*sync_sg_for_device)(struct device *hwdev,
105 struct scatterlist *sg, int nelems,
106 enum dma_data_direction direction);
107#endif
Anton Blanchard33ff9102007-10-16 14:54:33 -0500108};
109
Becky Bruce4fc665b2008-09-12 10:34:46 +0000110/*
111 * Available generic sets of operations
112 */
113#ifdef CONFIG_PPC64
114extern struct dma_mapping_ops dma_iommu_ops;
115#endif
116extern struct dma_mapping_ops dma_direct_ops;
117
Anton Blanchard33ff9102007-10-16 14:54:33 -0500118static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
119{
120 /* We don't handle the NULL dev case for ISA for now. We could
121 * do it via an out of line call but it is not needed for now. The
122 * only ISA DMA device we support is the floppy and we have a hack
123 * in the floppy driver directly to get a device for us.
124 */
Kumar Gala4ae0ff62009-03-19 03:40:52 +0000125 if (unlikely(dev == NULL))
Anton Blanchard33ff9102007-10-16 14:54:33 -0500126 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +0000127
Anton Blanchard33ff9102007-10-16 14:54:33 -0500128 return dev->archdata.dma_ops;
129}
130
Michael Ellerman1f62a162008-01-30 01:13:58 +1100131static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
132{
133 dev->archdata.dma_ops = ops;
134}
135
Anton Blanchard33ff9102007-10-16 14:54:33 -0500136static inline int dma_supported(struct device *dev, u64 mask)
137{
138 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
139
140 if (unlikely(dma_ops == NULL))
141 return 0;
142 if (dma_ops->dma_supported == NULL)
143 return 1;
144 return dma_ops->dma_supported(dev, mask);
145}
146
Michael Ellerman84631f32007-12-17 17:35:53 +1100147/* We have our own implementation of pci_set_dma_mask() */
148#define HAVE_ARCH_PCI_SET_DMA_MASK
149
Anton Blanchard33ff9102007-10-16 14:54:33 -0500150static inline int dma_set_mask(struct device *dev, u64 dma_mask)
151{
152 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
153
154 if (unlikely(dma_ops == NULL))
155 return -EIO;
156 if (dma_ops->set_dma_mask != NULL)
157 return dma_ops->set_dma_mask(dev, dma_mask);
158 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
159 return -EIO;
160 *dev->dma_mask = dma_mask;
161 return 0;
162}
163
Becky Bruce4fc665b2008-09-12 10:34:46 +0000164/*
Mark Nelsonc73049f2008-10-27 20:38:14 +0000165 * map_/unmap_single actually call through to map/unmap_page now that all the
166 * dma_mapping_ops have been converted over. We just have to get the page and
167 * offset to pass through to map_page
Becky Bruce4fc665b2008-09-12 10:34:46 +0000168 */
Mark Nelson3affedc2008-07-05 05:05:42 +1000169static inline dma_addr_t dma_map_single_attrs(struct device *dev,
170 void *cpu_addr,
171 size_t size,
172 enum dma_data_direction direction,
173 struct dma_attrs *attrs)
174{
175 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
176
177 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000178
Becky Bruce4fc665b2008-09-12 10:34:46 +0000179 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
180 (unsigned long)cpu_addr % PAGE_SIZE, size,
181 direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000182}
183
184static inline void dma_unmap_single_attrs(struct device *dev,
185 dma_addr_t dma_addr,
186 size_t size,
187 enum dma_data_direction direction,
188 struct dma_attrs *attrs)
189{
190 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
191
192 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000193
Becky Bruce4fc665b2008-09-12 10:34:46 +0000194 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000195}
196
197static inline dma_addr_t dma_map_page_attrs(struct device *dev,
198 struct page *page,
199 unsigned long offset, size_t size,
200 enum dma_data_direction direction,
201 struct dma_attrs *attrs)
202{
203 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
204
205 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000206
Mark Nelsonc73049f2008-10-27 20:38:14 +0000207 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000208}
209
210static inline void dma_unmap_page_attrs(struct device *dev,
211 dma_addr_t dma_address,
212 size_t size,
213 enum dma_data_direction direction,
214 struct dma_attrs *attrs)
215{
216 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
217
218 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000219
Mark Nelsonc73049f2008-10-27 20:38:14 +0000220 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000221}
222
223static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction direction,
225 struct dma_attrs *attrs)
226{
227 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
228
229 BUG_ON(!dma_ops);
230 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
231}
232
233static inline void dma_unmap_sg_attrs(struct device *dev,
234 struct scatterlist *sg,
235 int nhwentries,
236 enum dma_data_direction direction,
237 struct dma_attrs *attrs)
238{
239 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
240
241 BUG_ON(!dma_ops);
242 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
243}
244
Anton Blanchard33ff9102007-10-16 14:54:33 -0500245static inline void *dma_alloc_coherent(struct device *dev, size_t size,
246 dma_addr_t *dma_handle, gfp_t flag)
247{
248 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
249
250 BUG_ON(!dma_ops);
251 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
252}
253
254static inline void dma_free_coherent(struct device *dev, size_t size,
255 void *cpu_addr, dma_addr_t dma_handle)
256{
257 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
258
259 BUG_ON(!dma_ops);
260 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
261}
262
263static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
264 size_t size,
265 enum dma_data_direction direction)
266{
Mark Nelson3affedc2008-07-05 05:05:42 +1000267 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500268}
269
270static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
271 size_t size,
272 enum dma_data_direction direction)
273{
Mark Nelson3affedc2008-07-05 05:05:42 +1000274 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500275}
276
277static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
278 unsigned long offset, size_t size,
279 enum dma_data_direction direction)
280{
Mark Nelson3affedc2008-07-05 05:05:42 +1000281 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500282}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100283
284static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
285 size_t size,
286 enum dma_data_direction direction)
287{
Mark Nelson3affedc2008-07-05 05:05:42 +1000288 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100289}
290
291static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
292 int nents, enum dma_data_direction direction)
293{
Mark Nelson3affedc2008-07-05 05:05:42 +1000294 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100295}
296
297static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
298 int nhwentries,
299 enum dma_data_direction direction)
300{
Mark Nelson3affedc2008-07-05 05:05:42 +1000301 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100302}
303
Becky Bruce15e09c02008-11-20 06:49:16 +0000304#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
Stephen Rothwell78b09732005-11-19 01:40:46 +1100305static inline void dma_sync_single_for_cpu(struct device *dev,
306 dma_addr_t dma_handle, size_t size,
307 enum dma_data_direction direction)
308{
Becky Bruce15e09c02008-11-20 06:49:16 +0000309 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
310
311 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000312
313 if (dma_ops->sync_single_range_for_cpu)
314 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
Becky Bruce15e09c02008-11-20 06:49:16 +0000315 size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100316}
317
318static inline void dma_sync_single_for_device(struct device *dev,
319 dma_addr_t dma_handle, size_t size,
320 enum dma_data_direction direction)
321{
Becky Bruce15e09c02008-11-20 06:49:16 +0000322 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
323
324 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000325
326 if (dma_ops->sync_single_range_for_device)
327 dma_ops->sync_single_range_for_device(dev, dma_handle,
Becky Bruce15e09c02008-11-20 06:49:16 +0000328 0, size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100329}
330
331static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200332 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100333 enum dma_data_direction direction)
334{
Becky Bruce15e09c02008-11-20 06:49:16 +0000335 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100336
Becky Bruce15e09c02008-11-20 06:49:16 +0000337 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000338
339 if (dma_ops->sync_sg_for_cpu)
340 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100341}
342
343static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200344 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100345 enum dma_data_direction direction)
346{
Becky Bruce15e09c02008-11-20 06:49:16 +0000347 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100348
Becky Bruce15e09c02008-11-20 06:49:16 +0000349 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000350
351 if (dma_ops->sync_sg_for_device)
352 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100353}
354
Becky Bruce15e09c02008-11-20 06:49:16 +0000355static inline void dma_sync_single_range_for_cpu(struct device *dev,
356 dma_addr_t dma_handle, unsigned long offset, size_t size,
357 enum dma_data_direction direction)
358{
359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
360
361 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000362
363 if (dma_ops->sync_single_range_for_cpu)
364 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
Becky Bruce15e09c02008-11-20 06:49:16 +0000365 offset, size, direction);
366}
367
368static inline void dma_sync_single_range_for_device(struct device *dev,
369 dma_addr_t dma_handle, unsigned long offset, size_t size,
370 enum dma_data_direction direction)
371{
372 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
373
374 BUG_ON(!dma_ops);
Michael Ellerman6f0b1c62009-06-22 23:13:48 +0000375
376 if (dma_ops->sync_single_range_for_device)
377 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
Becky Bruce15e09c02008-11-20 06:49:16 +0000378 size, direction);
379}
380#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
Becky Bruce0efbb572008-12-04 08:12:40 +0000381static inline void dma_sync_single_for_cpu(struct device *dev,
382 dma_addr_t dma_handle, size_t size,
383 enum dma_data_direction direction)
384{
385}
386
387static inline void dma_sync_single_for_device(struct device *dev,
388 dma_addr_t dma_handle, size_t size,
389 enum dma_data_direction direction)
390{
391}
392
393static inline void dma_sync_sg_for_cpu(struct device *dev,
394 struct scatterlist *sgl, int nents,
395 enum dma_data_direction direction)
396{
397}
398
399static inline void dma_sync_sg_for_device(struct device *dev,
400 struct scatterlist *sgl, int nents,
401 enum dma_data_direction direction)
402{
403}
404
405static inline void dma_sync_single_range_for_cpu(struct device *dev,
406 dma_addr_t dma_handle, unsigned long offset, size_t size,
407 enum dma_data_direction direction)
408{
409}
410
411static inline void dma_sync_single_range_for_device(struct device *dev,
412 dma_addr_t dma_handle, unsigned long offset, size_t size,
413 enum dma_data_direction direction)
414{
415}
Becky Bruce15e09c02008-11-20 06:49:16 +0000416#endif
417
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700418static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100419{
420#ifdef CONFIG_PPC64
421 return (dma_addr == DMA_ERROR_CODE);
422#else
423 return 0;
424#endif
425}
426
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900427static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
428{
429 struct dma_mapping_ops *ops = get_dma_ops(dev);
430
431 if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size))
432 return 0;
433
434 if (!dev->dma_mask)
435 return 0;
436
437 return addr + size <= *dev->dma_mask;
438}
439
Stephen Rothwell78b09732005-11-19 01:40:46 +1100440#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
441#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
442#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800443#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100444#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800445#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100446#endif
447
448static inline int dma_get_cache_alignment(void)
449{
450#ifdef CONFIG_PPC64
451 /* no easy way to get cache size on all processors, so return
452 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800453 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100454#else
455 /*
456 * Each processor family will define its own L1_CACHE_SHIFT,
457 * L1_CACHE_BYTES wraps to this, so this is always safe.
458 */
459 return L1_CACHE_BYTES;
460#endif
461}
462
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800463static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100464 enum dma_data_direction direction)
465{
466 BUG_ON(direction == DMA_NONE);
467 __dma_sync(vaddr, size, (int)direction);
468}
469
Arnd Bergmann88ced032005-12-16 22:43:46 +0100470#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100471#endif /* _ASM_DMA_MAPPING_H */