Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Fallback functions when the main IOMMU code is not compiled in. This |
| 2 | code is roughly equivalent to i386. */ |
| 3 | #include <linux/mm.h> |
| 4 | #include <linux/init.h> |
| 5 | #include <linux/pci.h> |
| 6 | #include <linux/string.h> |
Andrew Morton | 8fa3d6f | 2006-06-26 13:59:05 +0200 | [diff] [blame] | 7 | #include <linux/dma-mapping.h> |
Jens Axboe | b922f53 | 2007-07-24 12:39:27 +0200 | [diff] [blame] | 8 | #include <linux/scatterlist.h> |
Andrew Morton | 8fa3d6f | 2006-06-26 13:59:05 +0200 | [diff] [blame] | 9 | |
FUJITA Tomonori | 46a7fa2 | 2008-07-11 10:23:42 +0900 | [diff] [blame] | 10 | #include <asm/iommu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <asm/processor.h> |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 12 | #include <asm/dma.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 14 | static int |
| 15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | { |
FUJITA Tomonori | 49fbf4e | 2008-09-10 01:06:48 +0900 | [diff] [blame] | 17 | if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { |
Andrew Morton | 8fa3d6f | 2006-06-26 13:59:05 +0200 | [diff] [blame] | 18 | if (*hwdev->dma_mask >= DMA_32BIT_MASK) |
Andi Kleen | f0fdabf | 2006-05-15 18:19:38 +0200 | [diff] [blame] | 19 | printk(KERN_ERR |
Andrew Morton | 8fa3d6f | 2006-06-26 13:59:05 +0200 | [diff] [blame] | 20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", |
| 21 | name, (long long)bus, size, |
| 22 | (long long)*hwdev->dma_mask); |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 23 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame^] | 28 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
| 29 | unsigned long offset, size_t size, |
| 30 | enum dma_data_direction dir, |
| 31 | struct dma_attrs *attrs) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 32 | { |
FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame^] | 33 | dma_addr_t bus = page_to_phys(page) + offset; |
Glauber Costa | 5b3e5b7 | 2008-04-08 13:20:49 -0300 | [diff] [blame] | 34 | WARN_ON(size == 0); |
FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame^] | 35 | if (!check_addr("map_single", dev, bus, size)) |
| 36 | return bad_dma_address; |
Glauber Costa | e4dcdd6 | 2008-04-08 13:20:46 -0300 | [diff] [blame] | 37 | flush_write_buffers(); |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 38 | return bus; |
| 39 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame^] | 41 | static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr, |
| 42 | size_t size, int direction) |
| 43 | { |
| 44 | return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), |
| 45 | paddr & ~PAGE_MASK, size, direction, NULL); |
| 46 | } |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 47 | |
| 48 | /* Map a set of buffers described by scatterlist in streaming |
| 49 | * mode for DMA. This is the scatter-gather version of the |
| 50 | * above pci_map_single interface. Here the scatter gather list |
| 51 | * elements are each tagged with the appropriate dma address |
| 52 | * and length. They are obtained via sg_dma_{address,length}(SG). |
| 53 | * |
| 54 | * NOTE: An implementation may be able to use a smaller number of |
| 55 | * DMA address/length pairs than there are SG table elements. |
| 56 | * (for example via virtual mapping capabilities) |
| 57 | * The routine returns the number of addr/length pairs actually |
| 58 | * used, at most nents. |
| 59 | * |
| 60 | * Device ownership issues as mentioned above for pci_map_single are |
| 61 | * the same here. |
| 62 | */ |
Yinghai Lu | 1048fa5 | 2007-07-21 17:11:23 +0200 | [diff] [blame] | 63 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 64 | int nents, int direction) |
| 65 | { |
Jens Axboe | b922f53 | 2007-07-24 12:39:27 +0200 | [diff] [blame] | 66 | struct scatterlist *s; |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 67 | int i; |
| 68 | |
Glauber Costa | 5b3e5b7 | 2008-04-08 13:20:49 -0300 | [diff] [blame] | 69 | WARN_ON(nents == 0 || sg[0].length == 0); |
| 70 | |
Jens Axboe | b922f53 | 2007-07-24 12:39:27 +0200 | [diff] [blame] | 71 | for_each_sg(sg, s, nents, i) { |
Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 72 | BUG_ON(!sg_page(s)); |
Glauber Costa | 30db2cb | 2008-04-08 13:20:47 -0300 | [diff] [blame] | 73 | s->dma_address = sg_phys(s); |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 74 | if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) |
| 75 | return 0; |
| 76 | s->dma_length = s->length; |
| 77 | } |
Glauber Costa | e4dcdd6 | 2008-04-08 13:20:46 -0300 | [diff] [blame] | 78 | flush_write_buffers(); |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 79 | return nents; |
| 80 | } |
| 81 | |
Joerg Roedel | a3a7653 | 2008-08-19 16:32:43 +0200 | [diff] [blame] | 82 | static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 83 | dma_addr_t dma_addr) |
| 84 | { |
| 85 | free_pages((unsigned long)vaddr, get_order(size)); |
| 86 | } |
| 87 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 88 | struct dma_mapping_ops nommu_dma_ops = { |
FUJITA Tomonori | 9f6ac57 | 2008-09-24 20:48:35 +0900 | [diff] [blame] | 89 | .alloc_coherent = dma_generic_alloc_coherent, |
Joerg Roedel | a3a7653 | 2008-08-19 16:32:43 +0200 | [diff] [blame] | 90 | .free_coherent = nommu_free_coherent, |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 91 | .map_single = nommu_map_single, |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 92 | .map_sg = nommu_map_sg, |
FUJITA Tomonori | 33feffd | 2009-01-05 23:47:27 +0900 | [diff] [blame^] | 93 | .map_page = nommu_map_page, |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 94 | .is_phys = 1, |
| 95 | }; |
| 96 | |
| 97 | void __init no_iommu_init(void) |
| 98 | { |
| 99 | if (dma_ops) |
| 100 | return; |
Muli Ben-Yehuda | a166222 | 2006-08-02 22:37:31 +0200 | [diff] [blame] | 101 | |
| 102 | force_iommu = 0; /* no HW IOMMU */ |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 103 | dma_ops = &nommu_dma_ops; |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 104 | } |