blob: 5a73a824ac1ccb3fa3d81728746a0fb7550bd13d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
3#include <linux/mm.h>
4#include <linux/init.h>
5#include <linux/pci.h>
6#include <linux/string.h>
Andrew Morton8fa3d6f2006-06-26 13:59:05 +02007#include <linux/dma-mapping.h>
Jens Axboeb922f532007-07-24 12:39:27 +02008#include <linux/scatterlist.h>
Andrew Morton8fa3d6f2006-06-26 13:59:05 +02009
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090010#include <asm/iommu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/processor.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010012#include <asm/dma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010014static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016{
FUJITA Tomonori49fbf4e2008-09-10 01:06:48 +090017 if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) {
Andrew Morton8fa3d6f2006-06-26 13:59:05 +020018 if (*hwdev->dma_mask >= DMA_32BIT_MASK)
Andi Kleenf0fdabf2006-05-15 18:19:38 +020019 printk(KERN_ERR
Andrew Morton8fa3d6f2006-06-26 13:59:05 +020020 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
21 name, (long long)bus, size,
22 (long long)*hwdev->dma_mask);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010023 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026}
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090028static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
29 unsigned long offset, size_t size,
30 enum dma_data_direction dir,
31 struct dma_attrs *attrs)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010032{
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090033 dma_addr_t bus = page_to_phys(page) + offset;
Glauber Costa5b3e5b72008-04-08 13:20:49 -030034 WARN_ON(size == 0);
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090035 if (!check_addr("map_single", dev, bus, size))
36 return bad_dma_address;
Glauber Costae4dcdd62008-04-08 13:20:46 -030037 flush_write_buffers();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010038 return bus;
39}
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090041static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr,
42 size_t size, int direction)
43{
44 return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
45 paddr & ~PAGE_MASK, size, direction, NULL);
46}
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010047
48/* Map a set of buffers described by scatterlist in streaming
49 * mode for DMA. This is the scatter-gather version of the
50 * above pci_map_single interface. Here the scatter gather list
51 * elements are each tagged with the appropriate dma address
52 * and length. They are obtained via sg_dma_{address,length}(SG).
53 *
54 * NOTE: An implementation may be able to use a smaller number of
55 * DMA address/length pairs than there are SG table elements.
56 * (for example via virtual mapping capabilities)
57 * The routine returns the number of addr/length pairs actually
58 * used, at most nents.
59 *
60 * Device ownership issues as mentioned above for pci_map_single are
61 * the same here.
62 */
Yinghai Lu1048fa52007-07-21 17:11:23 +020063static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010064 int nents, int direction)
65{
Jens Axboeb922f532007-07-24 12:39:27 +020066 struct scatterlist *s;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010067 int i;
68
Glauber Costa5b3e5b72008-04-08 13:20:49 -030069 WARN_ON(nents == 0 || sg[0].length == 0);
70
Jens Axboeb922f532007-07-24 12:39:27 +020071 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +020072 BUG_ON(!sg_page(s));
Glauber Costa30db2cb2008-04-08 13:20:47 -030073 s->dma_address = sg_phys(s);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010074 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
75 return 0;
76 s->dma_length = s->length;
77 }
Glauber Costae4dcdd62008-04-08 13:20:46 -030078 flush_write_buffers();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010079 return nents;
80}
81
Joerg Roedela3a76532008-08-19 16:32:43 +020082static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
83 dma_addr_t dma_addr)
84{
85 free_pages((unsigned long)vaddr, get_order(size));
86}
87
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070088struct dma_mapping_ops nommu_dma_ops = {
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090089 .alloc_coherent = dma_generic_alloc_coherent,
Joerg Roedela3a76532008-08-19 16:32:43 +020090 .free_coherent = nommu_free_coherent,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010091 .map_single = nommu_map_single,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010092 .map_sg = nommu_map_sg,
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090093 .map_page = nommu_map_page,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010094 .is_phys = 1,
95};
96
97void __init no_iommu_init(void)
98{
99 if (dma_ops)
100 return;
Muli Ben-Yehudaa1662222006-08-02 22:37:31 +0200101
102 force_iommu = 0; /* no HW IOMMU */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100103 dma_ops = &nommu_dma_ops;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100104}