| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Fallback functions when the main IOMMU code is not compiled in. This | 
 | 2 |    code is roughly equivalent to i386. */ | 
| Andrew Morton | 8fa3d6f | 2006-06-26 13:59:05 +0200 | [diff] [blame] | 3 | #include <linux/dma-mapping.h> | 
| Jens Axboe | b922f53 | 2007-07-24 12:39:27 +0200 | [diff] [blame] | 4 | #include <linux/scatterlist.h> | 
| Jaswinder Singh Rajput | 1894e36 | 2009-03-21 17:01:25 +0530 | [diff] [blame] | 5 | #include <linux/string.h> | 
 | 6 | #include <linux/init.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 7 | #include <linux/gfp.h> | 
| Jaswinder Singh Rajput | 1894e36 | 2009-03-21 17:01:25 +0530 | [diff] [blame] | 8 | #include <linux/pci.h> | 
 | 9 | #include <linux/mm.h> | 
| Andrew Morton | 8fa3d6f | 2006-06-26 13:59:05 +0200 | [diff] [blame] | 10 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <asm/processor.h> | 
| Jaswinder Singh Rajput | 1894e36 | 2009-03-21 17:01:25 +0530 | [diff] [blame] | 12 | #include <asm/iommu.h> | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 13 | #include <asm/dma.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 15 | static int | 
 | 16 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | { | 
| FUJITA Tomonori | a4c2baa | 2009-07-10 10:04:55 +0900 | [diff] [blame] | 18 | 	if (hwdev && !dma_capable(hwdev, bus, size)) { | 
| Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 19 | 		if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) | 
| Andi Kleen | f0fdabf | 2006-05-15 18:19:38 +0200 | [diff] [blame] | 20 | 			printk(KERN_ERR | 
| Andrew Morton | 8fa3d6f | 2006-06-26 13:59:05 +0200 | [diff] [blame] | 21 | 			    "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", | 
 | 22 | 				name, (long long)bus, size, | 
 | 23 | 				(long long)*hwdev->dma_mask); | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 24 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | 	return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
| FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame] | 29 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | 
 | 30 | 				 unsigned long offset, size_t size, | 
 | 31 | 				 enum dma_data_direction dir, | 
 | 32 | 				 struct dma_attrs *attrs) | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 33 | { | 
| FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame] | 34 | 	dma_addr_t bus = page_to_phys(page) + offset; | 
| Glauber Costa | 5b3e5b7 | 2008-04-08 13:20:49 -0300 | [diff] [blame] | 35 | 	WARN_ON(size == 0); | 
| FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame] | 36 | 	if (!check_addr("map_single", dev, bus, size)) | 
| FUJITA Tomonori | 8fd524b | 2009-11-15 21:19:53 +0900 | [diff] [blame] | 37 | 		return DMA_ERROR_CODE; | 
| Glauber Costa | e4dcdd6 | 2008-04-08 13:20:46 -0300 | [diff] [blame] | 38 | 	flush_write_buffers(); | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 39 | 	return bus; | 
 | 40 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 42 | /* Map a set of buffers described by scatterlist in streaming | 
 | 43 |  * mode for DMA.  This is the scatter-gather version of the | 
 | 44 |  * above pci_map_single interface.  Here the scatter gather list | 
 | 45 |  * elements are each tagged with the appropriate dma address | 
 | 46 |  * and length.  They are obtained via sg_dma_{address,length}(SG). | 
 | 47 |  * | 
 | 48 |  * NOTE: An implementation may be able to use a smaller number of | 
 | 49 |  *       DMA address/length pairs than there are SG table elements. | 
 | 50 |  *       (for example via virtual mapping capabilities) | 
 | 51 |  *       The routine returns the number of addr/length pairs actually | 
 | 52 |  *       used, at most nents. | 
 | 53 |  * | 
 | 54 |  * Device ownership issues as mentioned above for pci_map_single are | 
 | 55 |  * the same here. | 
 | 56 |  */ | 
| Yinghai Lu | 1048fa5 | 2007-07-21 17:11:23 +0200 | [diff] [blame] | 57 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 58 | 			int nents, enum dma_data_direction dir, | 
 | 59 | 			struct dma_attrs *attrs) | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 60 | { | 
| Jens Axboe | b922f53 | 2007-07-24 12:39:27 +0200 | [diff] [blame] | 61 | 	struct scatterlist *s; | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 62 | 	int i; | 
 | 63 |  | 
| Glauber Costa | 5b3e5b7 | 2008-04-08 13:20:49 -0300 | [diff] [blame] | 64 | 	WARN_ON(nents == 0 || sg[0].length == 0); | 
 | 65 |  | 
| Jens Axboe | b922f53 | 2007-07-24 12:39:27 +0200 | [diff] [blame] | 66 | 	for_each_sg(sg, s, nents, i) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 67 | 		BUG_ON(!sg_page(s)); | 
| Glauber Costa | 30db2cb | 2008-04-08 13:20:47 -0300 | [diff] [blame] | 68 | 		s->dma_address = sg_phys(s); | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 69 | 		if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) | 
 | 70 | 			return 0; | 
 | 71 | 		s->dma_length = s->length; | 
 | 72 | 	} | 
| Glauber Costa | e4dcdd6 | 2008-04-08 13:20:46 -0300 | [diff] [blame] | 73 | 	flush_write_buffers(); | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 74 | 	return nents; | 
 | 75 | } | 
 | 76 |  | 
| Joerg Roedel | a3a7653 | 2008-08-19 16:32:43 +0200 | [diff] [blame] | 77 | static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | 
| Andrzej Pietrasiewicz | baa676f | 2012-03-27 14:28:18 +0200 | [diff] [blame] | 78 | 				dma_addr_t dma_addr, struct dma_attrs *attrs) | 
| Joerg Roedel | a3a7653 | 2008-08-19 16:32:43 +0200 | [diff] [blame] | 79 | { | 
 | 80 | 	free_pages((unsigned long)vaddr, get_order(size)); | 
 | 81 | } | 
 | 82 |  | 
| Arnd Bergmann | a8ad568 | 2009-08-10 11:53:10 +0900 | [diff] [blame] | 83 | static void nommu_sync_single_for_device(struct device *dev, | 
 | 84 | 			dma_addr_t addr, size_t size, | 
 | 85 | 			enum dma_data_direction dir) | 
 | 86 | { | 
 | 87 | 	flush_write_buffers(); | 
 | 88 | } | 
 | 89 |  | 
 | 90 |  | 
 | 91 | static void nommu_sync_sg_for_device(struct device *dev, | 
 | 92 | 			struct scatterlist *sg, int nelems, | 
 | 93 | 			enum dma_data_direction dir) | 
 | 94 | { | 
 | 95 | 	flush_write_buffers(); | 
 | 96 | } | 
 | 97 |  | 
| FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 98 | struct dma_map_ops nommu_dma_ops = { | 
| Andrzej Pietrasiewicz | baa676f | 2012-03-27 14:28:18 +0200 | [diff] [blame] | 99 | 	.alloc			= dma_generic_alloc_coherent, | 
 | 100 | 	.free			= nommu_free_coherent, | 
| Arnd Bergmann | a8ad568 | 2009-08-10 11:53:10 +0900 | [diff] [blame] | 101 | 	.map_sg			= nommu_map_sg, | 
 | 102 | 	.map_page		= nommu_map_page, | 
 | 103 | 	.sync_single_for_device = nommu_sync_single_for_device, | 
 | 104 | 	.sync_sg_for_device	= nommu_sync_sg_for_device, | 
 | 105 | 	.is_phys		= 1, | 
| Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 106 | }; |