Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* Fallback functions when the main IOMMU code is not compiled in. This |
| 2 | code is roughly equivalent to i386. */ |
| 3 | #include <linux/mm.h> |
| 4 | #include <linux/init.h> |
| 5 | #include <linux/pci.h> |
| 6 | #include <linux/string.h> |
| 7 | #include <asm/proto.h> |
| 8 | #include <asm/processor.h> |
| 9 | |
| 10 | int iommu_merge = 0; |
| 11 | EXPORT_SYMBOL(iommu_merge); |
| 12 | |
| 13 | dma_addr_t bad_dma_address; |
| 14 | EXPORT_SYMBOL(bad_dma_address); |
| 15 | |
| 16 | int iommu_bio_merge = 0; |
| 17 | EXPORT_SYMBOL(iommu_bio_merge); |
| 18 | |
| 19 | int iommu_sac_force = 0; |
| 20 | EXPORT_SYMBOL(iommu_sac_force); |
| 21 | |
| 22 | /* |
| 23 | * Dummy IO MMU functions |
| 24 | */ |
| 25 | |
| 26 | void *dma_alloc_coherent(struct device *hwdev, size_t size, |
| 27 | dma_addr_t *dma_handle, unsigned gfp) |
| 28 | { |
| 29 | void *ret; |
| 30 | u64 mask; |
| 31 | int order = get_order(size); |
| 32 | |
| 33 | if (hwdev) |
| 34 | mask = hwdev->coherent_dma_mask & *hwdev->dma_mask; |
| 35 | else |
| 36 | mask = 0xffffffff; |
| 37 | for (;;) { |
| 38 | ret = (void *)__get_free_pages(gfp, order); |
| 39 | if (ret == NULL) |
| 40 | return NULL; |
| 41 | *dma_handle = virt_to_bus(ret); |
| 42 | if ((*dma_handle & ~mask) == 0) |
| 43 | break; |
| 44 | free_pages((unsigned long)ret, order); |
| 45 | if (gfp & GFP_DMA) |
| 46 | return NULL; |
| 47 | gfp |= GFP_DMA; |
| 48 | } |
| 49 | |
| 50 | memset(ret, 0, size); |
| 51 | return ret; |
| 52 | } |
| 53 | EXPORT_SYMBOL(dma_alloc_coherent); |
| 54 | |
| 55 | void dma_free_coherent(struct device *hwdev, size_t size, |
| 56 | void *vaddr, dma_addr_t dma_handle) |
| 57 | { |
| 58 | free_pages((unsigned long)vaddr, get_order(size)); |
| 59 | } |
| 60 | EXPORT_SYMBOL(dma_free_coherent); |
| 61 | |
| 62 | int dma_supported(struct device *hwdev, u64 mask) |
| 63 | { |
| 64 | /* |
| 65 | * we fall back to GFP_DMA when the mask isn't all 1s, |
| 66 | * so we can't guarantee allocations that must be |
| 67 | * within a tighter range than GFP_DMA.. |
| 68 | * RED-PEN this won't work for pci_map_single. Caller has to |
| 69 | * use GFP_DMA in the first place. |
| 70 | */ |
| 71 | if (mask < 0x00ffffff) |
| 72 | return 0; |
| 73 | |
| 74 | return 1; |
| 75 | } |
| 76 | EXPORT_SYMBOL(dma_supported); |
| 77 | |
| 78 | int dma_get_cache_alignment(void) |
| 79 | { |
| 80 | return boot_cpu_data.x86_clflush_size; |
| 81 | } |
| 82 | EXPORT_SYMBOL(dma_get_cache_alignment); |
| 83 | |
| 84 | static int __init check_ram(void) |
| 85 | { |
| 86 | if (end_pfn >= 0xffffffff>>PAGE_SHIFT) { |
| 87 | printk( |
| 88 | KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n" |
| 89 | KERN_ERR "WARNING 32bit PCI may malfunction.\n"); |
| 90 | } |
| 91 | return 0; |
| 92 | } |
| 93 | __initcall(check_ram); |
| 94 | |