blob: b8ce83c982110c72e28aa9772bc068f93da59304 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
3#include <linux/mm.h>
4#include <linux/init.h>
5#include <linux/pci.h>
6#include <linux/string.h>
Andrew Morton8fa3d6f2006-06-26 13:59:05 +02007#include <linux/dma-mapping.h>
Jens Axboeb922f532007-07-24 12:39:27 +02008#include <linux/scatterlist.h>
Andrew Morton8fa3d6f2006-06-26 13:59:05 +02009
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090010#include <asm/iommu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/processor.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010012#include <asm/dma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010014static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016{
Glauber Costaf9c258d2008-04-08 13:20:52 -030017 if (hwdev && bus + size > *hwdev->dma_mask) {
Andrew Morton8fa3d6f2006-06-26 13:59:05 +020018 if (*hwdev->dma_mask >= DMA_32BIT_MASK)
Andi Kleenf0fdabf2006-05-15 18:19:38 +020019 printk(KERN_ERR
Andrew Morton8fa3d6f2006-06-26 13:59:05 +020020 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
21 name, (long long)bus, size,
22 (long long)*hwdev->dma_mask);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010023 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026}
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010028static dma_addr_t
Ingo Molnar2be62142008-04-19 19:19:56 +020029nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010030 int direction)
31{
Ingo Molnar2be62142008-04-19 19:19:56 +020032 dma_addr_t bus = paddr;
Glauber Costa5b3e5b72008-04-08 13:20:49 -030033 WARN_ON(size == 0);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010034 if (!check_addr("map_single", hwdev, bus, size))
35 return bad_dma_address;
Glauber Costae4dcdd62008-04-08 13:20:46 -030036 flush_write_buffers();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010037 return bus;
38}
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010040
41/* Map a set of buffers described by scatterlist in streaming
42 * mode for DMA. This is the scatter-gather version of the
43 * above pci_map_single interface. Here the scatter gather list
44 * elements are each tagged with the appropriate dma address
45 * and length. They are obtained via sg_dma_{address,length}(SG).
46 *
47 * NOTE: An implementation may be able to use a smaller number of
48 * DMA address/length pairs than there are SG table elements.
49 * (for example via virtual mapping capabilities)
50 * The routine returns the number of addr/length pairs actually
51 * used, at most nents.
52 *
53 * Device ownership issues as mentioned above for pci_map_single are
54 * the same here.
55 */
Yinghai Lu1048fa52007-07-21 17:11:23 +020056static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010057 int nents, int direction)
58{
Jens Axboeb922f532007-07-24 12:39:27 +020059 struct scatterlist *s;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010060 int i;
61
Glauber Costa5b3e5b72008-04-08 13:20:49 -030062 WARN_ON(nents == 0 || sg[0].length == 0);
63
Jens Axboeb922f532007-07-24 12:39:27 +020064 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +020065 BUG_ON(!sg_page(s));
Glauber Costa30db2cb2008-04-08 13:20:47 -030066 s->dma_address = sg_phys(s);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010067 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
68 return 0;
69 s->dma_length = s->length;
70 }
Glauber Costae4dcdd62008-04-08 13:20:46 -030071 flush_write_buffers();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010072 return nents;
73}
74
Joerg Roedelc5e835f2008-08-19 16:32:42 +020075static void *
76nommu_alloc_coherent(struct device *hwdev, size_t size,
77 dma_addr_t *dma_addr, gfp_t gfp)
78{
79 unsigned long dma_mask;
80 int node;
81 struct page *page;
82
83 if (hwdev->dma_mask == NULL)
84 return NULL;
85
86 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
87 gfp |= __GFP_ZERO;
88
89 dma_mask = hwdev->coherent_dma_mask;
90 if (!dma_mask)
91 dma_mask = *(hwdev->dma_mask);
92
93 if (dma_mask < DMA_24BIT_MASK)
94 return NULL;
95
96 node = dev_to_node(hwdev);
97
98#ifdef CONFIG_X86_64
99 if (dma_mask <= DMA_32BIT_MASK)
100 gfp |= GFP_DMA32;
101#endif
102
103 /* No alloc-free penalty for ISA devices */
104 if (dma_mask == DMA_24BIT_MASK)
105 gfp |= GFP_DMA;
106
107again:
108 page = alloc_pages_node(node, gfp, get_order(size));
109 if (!page)
110 return NULL;
111
112 if ((page_to_phys(page) + size > dma_mask) && !(gfp & GFP_DMA)) {
113 free_pages((unsigned long)page_address(page), get_order(size));
114 gfp |= GFP_DMA;
115 goto again;
116 }
117
118 *dma_addr = page_to_phys(page);
119 if (check_addr("alloc_coherent", hwdev, *dma_addr, size)) {
120 flush_write_buffers();
121 return page_address(page);
122 }
123
124 free_pages((unsigned long)page_address(page), get_order(size));
125
126 return NULL;
127}
128
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700129struct dma_mapping_ops nommu_dma_ops = {
Joerg Roedelc5e835f2008-08-19 16:32:42 +0200130 .alloc_coherent = nommu_alloc_coherent,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100131 .map_single = nommu_map_single,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100132 .map_sg = nommu_map_sg,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100133 .is_phys = 1,
134};
135
136void __init no_iommu_init(void)
137{
138 if (dma_ops)
139 return;
Muli Ben-Yehudaa1662222006-08-02 22:37:31 +0200140
141 force_iommu = 0; /* no HW IOMMU */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100142 dma_ops = &nommu_dma_ops;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100143}