| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 1 | /* | 
 | 2 |  * DMA mapping support for platforms lacking IOMMUs. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2009  Paul Mundt | 
 | 5 |  * | 
 | 6 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 7 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 8 |  * for more details. | 
 | 9 |  */ | 
 | 10 | #include <linux/dma-mapping.h> | 
 | 11 | #include <linux/io.h> | 
 | 12 |  | 
 | 13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | 
 | 14 | 				 unsigned long offset, size_t size, | 
 | 15 | 				 enum dma_data_direction dir, | 
 | 16 | 				 struct dma_attrs *attrs) | 
 | 17 | { | 
 | 18 | 	dma_addr_t addr = page_to_phys(page) + offset; | 
 | 19 |  | 
 | 20 | 	WARN_ON(size == 0); | 
 | 21 | 	dma_cache_sync(dev, page_address(page) + offset, size, dir); | 
 | 22 |  | 
 | 23 | 	return addr; | 
 | 24 | } | 
 | 25 |  | 
 | 26 | static int nommu_map_sg(struct device *dev, struct scatterlist *sg, | 
 | 27 | 			int nents, enum dma_data_direction dir, | 
 | 28 | 			struct dma_attrs *attrs) | 
 | 29 | { | 
 | 30 | 	struct scatterlist *s; | 
 | 31 | 	int i; | 
 | 32 |  | 
 | 33 | 	WARN_ON(nents == 0 || sg[0].length == 0); | 
 | 34 |  | 
 | 35 | 	for_each_sg(sg, s, nents, i) { | 
 | 36 | 		BUG_ON(!sg_page(s)); | 
 | 37 |  | 
 | 38 | 		dma_cache_sync(dev, sg_virt(s), s->length, dir); | 
 | 39 |  | 
 | 40 | 		s->dma_address = sg_phys(s); | 
 | 41 | 		s->dma_length = s->length; | 
 | 42 | 	} | 
 | 43 |  | 
 | 44 | 	return nents; | 
 | 45 | } | 
 | 46 |  | 
| Paul Mundt | 01be5d6 | 2009-10-27 10:35:02 +0900 | [diff] [blame] | 47 | #ifdef CONFIG_DMA_NONCOHERENT | 
| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 48 | static void nommu_sync_single(struct device *dev, dma_addr_t addr, | 
 | 49 | 			      size_t size, enum dma_data_direction dir) | 
 | 50 | { | 
 | 51 | 	dma_cache_sync(dev, phys_to_virt(addr), size, dir); | 
 | 52 | } | 
 | 53 |  | 
 | 54 | static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, | 
 | 55 | 			  int nelems, enum dma_data_direction dir) | 
 | 56 | { | 
 | 57 | 	struct scatterlist *s; | 
 | 58 | 	int i; | 
 | 59 |  | 
 | 60 | 	for_each_sg(sg, s, nelems, i) | 
 | 61 | 		dma_cache_sync(dev, sg_virt(s), s->length, dir); | 
 | 62 | } | 
| Paul Mundt | 01be5d6 | 2009-10-27 10:35:02 +0900 | [diff] [blame] | 63 | #endif | 
| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 64 |  | 
 | 65 | struct dma_map_ops nommu_dma_ops = { | 
| Paul Mundt | f32154c | 2009-10-26 09:50:51 +0900 | [diff] [blame] | 66 | 	.alloc_coherent		= dma_generic_alloc_coherent, | 
 | 67 | 	.free_coherent		= dma_generic_free_coherent, | 
| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 68 | 	.map_page		= nommu_map_page, | 
 | 69 | 	.map_sg			= nommu_map_sg, | 
| Paul Mundt | 01be5d6 | 2009-10-27 10:35:02 +0900 | [diff] [blame] | 70 | #ifdef CONFIG_DMA_NONCOHERENT | 
| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 71 | 	.sync_single_for_device	= nommu_sync_single, | 
 | 72 | 	.sync_sg_for_device	= nommu_sync_sg, | 
| Paul Mundt | 01be5d6 | 2009-10-27 10:35:02 +0900 | [diff] [blame] | 73 | #endif | 
| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 74 | 	.is_phys		= 1, | 
 | 75 | }; | 
 | 76 |  | 
 | 77 | void __init no_iommu_init(void) | 
 | 78 | { | 
 | 79 | 	if (dma_ops) | 
 | 80 | 		return; | 
 | 81 | 	dma_ops = &nommu_dma_ops; | 
 | 82 | } |