blob: 3c539d111abbacc9d2826ec6bcee5bbda30bfe44 [file] [log] [blame]
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +01001/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
5#include <linux/module.h>
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -07006#include <linux/dma-mapping.h>
7
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +09008#include <asm/iommu.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +01009#include <asm/swiotlb.h>
10#include <asm/dma.h>
11
12int swiotlb __read_mostly;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010013
Ingo Molnar2be62142008-04-19 19:19:56 +020014static dma_addr_t
15swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
16 int direction)
17{
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19}
20
FUJITA Tomonori03967c52008-10-23 23:14:29 +090021static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
22 dma_addr_t *dma_handle, gfp_t flags)
23{
24 void *vaddr;
25
26 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
27 if (vaddr)
28 return vaddr;
29
30 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
31}
32
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070033struct dma_mapping_ops swiotlb_dma_ops = {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010034 .mapping_error = swiotlb_dma_mapping_error,
FUJITA Tomonori03967c52008-10-23 23:14:29 +090035 .alloc_coherent = x86_swiotlb_alloc_coherent,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010036 .free_coherent = swiotlb_free_coherent,
Ingo Molnar2be62142008-04-19 19:19:56 +020037 .map_single = swiotlb_map_single_phys,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010038 .unmap_single = swiotlb_unmap_single,
39 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
40 .sync_single_for_device = swiotlb_sync_single_for_device,
41 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
42 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
43 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
44 .sync_sg_for_device = swiotlb_sync_sg_for_device,
45 .map_sg = swiotlb_map_sg,
46 .unmap_sg = swiotlb_unmap_sg,
47 .dma_supported = NULL,
48};
49
Jan Beulich563aaf02007-02-05 18:51:25 -080050void __init pci_swiotlb_init(void)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010051{
52 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
Yinghai Luc987d122008-06-24 22:14:09 -070053 if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010054 swiotlb = 1;
Andi Kleen65f87d82006-07-29 21:42:49 +020055 if (swiotlb_force)
56 swiotlb = 1;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010057 if (swiotlb) {
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010058 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
Jon Mason5b7b6442006-02-03 21:51:59 +010059 swiotlb_init();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010060 dma_ops = &swiotlb_dma_ops;
61 }
62}