| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_DMA_MAPPING_H | 
 | 2 | #define _ASM_IA64_DMA_MAPPING_H | 
 | 3 |  | 
 | 4 | /* | 
 | 5 |  * Copyright (C) 2003-2004 Hewlett-Packard Co | 
 | 6 |  *	David Mosberger-Tang <davidm@hpl.hp.com> | 
 | 7 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/machvec.h> | 
| Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 9 | #include <linux/scatterlist.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 |  | 
| FUJITA Tomonori | 3a80b6a | 2008-09-08 18:10:10 +0900 | [diff] [blame] | 11 | #define dma_alloc_coherent(dev, size, handle, gfp)	\ | 
 | 12 | 	platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) | 
 | 13 |  | 
| Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 14 | /* coherent mem. is cheap */ | 
 | 15 | static inline void * | 
 | 16 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 
 | 17 | 		      gfp_t flag) | 
 | 18 | { | 
 | 19 | 	return dma_alloc_coherent(dev, size, dma_handle, flag); | 
 | 20 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #define dma_free_coherent	platform_dma_free_coherent | 
| Roland Dreier | b7de8e7 | 2007-02-14 00:32:53 -0800 | [diff] [blame] | 22 | static inline void | 
 | 23 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 
 | 24 | 		     dma_addr_t dma_handle) | 
 | 25 | { | 
 | 26 | 	dma_free_coherent(dev, size, cpu_addr, dma_handle); | 
 | 27 | } | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 28 | #define dma_map_single_attrs	platform_dma_map_single_attrs | 
 | 29 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 
 | 30 | 					size_t size, int dir) | 
 | 31 | { | 
 | 32 | 	return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); | 
 | 33 | } | 
 | 34 | #define dma_map_sg_attrs	platform_dma_map_sg_attrs | 
 | 35 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, | 
 | 36 | 			     int nents, int dir) | 
 | 37 | { | 
 | 38 | 	return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); | 
 | 39 | } | 
 | 40 | #define dma_unmap_single_attrs	platform_dma_unmap_single_attrs | 
 | 41 | static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, | 
 | 42 | 				    size_t size, int dir) | 
 | 43 | { | 
 | 44 | 	return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); | 
 | 45 | } | 
 | 46 | #define dma_unmap_sg_attrs	platform_dma_unmap_sg_attrs | 
 | 47 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | 
 | 48 | 				int nents, int dir) | 
 | 49 | { | 
 | 50 | 	return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); | 
 | 51 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #define dma_sync_single_for_cpu	platform_dma_sync_single_for_cpu | 
 | 53 | #define dma_sync_sg_for_cpu	platform_dma_sync_sg_for_cpu | 
 | 54 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | 
 | 55 | #define dma_sync_sg_for_device	platform_dma_sync_sg_for_device | 
 | 56 | #define dma_mapping_error	platform_dma_mapping_error | 
 | 57 |  | 
 | 58 | #define dma_map_page(dev, pg, off, size, dir)				\ | 
 | 59 | 	dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | 
 | 60 | #define dma_unmap_page(dev, dma_addr, size, dir)			\ | 
 | 61 | 	dma_unmap_single(dev, dma_addr, size, dir) | 
 | 62 |  | 
 | 63 | /* | 
 | 64 |  * Rest of this file is part of the "Advanced DMA API".  Use at your own risk. | 
 | 65 |  * See Documentation/DMA-API.txt for details. | 
 | 66 |  */ | 
 | 67 |  | 
 | 68 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)	\ | 
 | 69 | 	dma_sync_single_for_cpu(dev, dma_handle, size, dir) | 
 | 70 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)	\ | 
 | 71 | 	dma_sync_single_for_device(dev, dma_handle, size, dir) | 
 | 72 |  | 
 | 73 | #define dma_supported		platform_dma_supported | 
 | 74 |  | 
 | 75 | static inline int | 
 | 76 | dma_set_mask (struct device *dev, u64 mask) | 
 | 77 | { | 
 | 78 | 	if (!dev->dma_mask || !dma_supported(dev, mask)) | 
 | 79 | 		return -EIO; | 
 | 80 | 	*dev->dma_mask = mask; | 
 | 81 | 	return 0; | 
 | 82 | } | 
 | 83 |  | 
| John W. Linville | e1531b4 | 2005-11-07 00:57:54 -0800 | [diff] [blame] | 84 | extern int dma_get_cache_alignment(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 |  | 
 | 86 | static inline void | 
| Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 87 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, | 
 | 88 | 	enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | { | 
 | 90 | 	/* | 
 | 91 | 	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to | 
 | 92 | 	 * ensure that dma_cache_sync() enforces order, hence the mb(). | 
 | 93 | 	 */ | 
 | 94 | 	mb(); | 
 | 95 | } | 
 | 96 |  | 
| Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 97 | #define dma_is_consistent(d, h)	(1)	/* all we do is coherent memory... */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 |  | 
 | 99 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |