| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2004 IBM Corporation | 
 | 3 |  * | 
 | 4 |  * Implements the generic device dma API for ppc64. Handles | 
 | 5 |  * the pci and vio busses | 
 | 6 |  */ | 
 | 7 |  | 
 | 8 | #include <linux/device.h> | 
 | 9 | #include <linux/dma-mapping.h> | 
 | 10 | /* Include the busses we support */ | 
 | 11 | #include <linux/pci.h> | 
 | 12 | #include <asm/vio.h> | 
 | 13 | #include <asm/scatterlist.h> | 
 | 14 | #include <asm/bug.h> | 
 | 15 |  | 
 | 16 | static struct dma_mapping_ops *get_dma_ops(struct device *dev) | 
 | 17 | { | 
| Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 18 | #ifdef CONFIG_PCI | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | 	if (dev->bus == &pci_bus_type) | 
 | 20 | 		return &pci_dma_ops; | 
| Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 21 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_IBMVIO | 
 | 23 | 	if (dev->bus == &vio_bus_type) | 
 | 24 | 		return &vio_dma_ops; | 
 | 25 | #endif | 
 | 26 | 	return NULL; | 
 | 27 | } | 
 | 28 |  | 
 | 29 | int dma_supported(struct device *dev, u64 mask) | 
 | 30 | { | 
 | 31 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 32 |  | 
 | 33 | 	if (dma_ops) | 
 | 34 | 		return dma_ops->dma_supported(dev, mask); | 
 | 35 | 	BUG(); | 
 | 36 | 	return 0; | 
 | 37 | } | 
 | 38 | EXPORT_SYMBOL(dma_supported); | 
 | 39 |  | 
 | 40 | int dma_set_mask(struct device *dev, u64 dma_mask) | 
 | 41 | { | 
| Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 42 | #ifdef CONFIG_PCI | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | 	if (dev->bus == &pci_bus_type) | 
 | 44 | 		return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | 
| Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 45 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #ifdef CONFIG_IBMVIO | 
 | 47 | 	if (dev->bus == &vio_bus_type) | 
 | 48 | 		return -EIO; | 
 | 49 | #endif /* CONFIG_IBMVIO */ | 
 | 50 | 	BUG(); | 
 | 51 | 	return 0; | 
 | 52 | } | 
 | 53 | EXPORT_SYMBOL(dma_set_mask); | 
 | 54 |  | 
 | 55 | void *dma_alloc_coherent(struct device *dev, size_t size, | 
 | 56 | 		dma_addr_t *dma_handle, unsigned int __nocast flag) | 
 | 57 | { | 
 | 58 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 59 |  | 
 | 60 | 	if (dma_ops) | 
 | 61 | 		return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | 
 | 62 | 	BUG(); | 
 | 63 | 	return NULL; | 
 | 64 | } | 
 | 65 | EXPORT_SYMBOL(dma_alloc_coherent); | 
 | 66 |  | 
 | 67 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 
 | 68 | 		dma_addr_t dma_handle) | 
 | 69 | { | 
 | 70 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 71 |  | 
 | 72 | 	if (dma_ops) | 
 | 73 | 		dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | 
 | 74 | 	else | 
 | 75 | 		BUG(); | 
 | 76 | } | 
 | 77 | EXPORT_SYMBOL(dma_free_coherent); | 
 | 78 |  | 
 | 79 | dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 
 | 80 | 		enum dma_data_direction direction) | 
 | 81 | { | 
 | 82 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 83 |  | 
 | 84 | 	if (dma_ops) | 
 | 85 | 		return dma_ops->map_single(dev, cpu_addr, size, direction); | 
 | 86 | 	BUG(); | 
 | 87 | 	return (dma_addr_t)0; | 
 | 88 | } | 
 | 89 | EXPORT_SYMBOL(dma_map_single); | 
 | 90 |  | 
 | 91 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 
 | 92 | 		enum dma_data_direction direction) | 
 | 93 | { | 
 | 94 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 95 |  | 
 | 96 | 	if (dma_ops) | 
 | 97 | 		dma_ops->unmap_single(dev, dma_addr, size, direction); | 
 | 98 | 	else | 
 | 99 | 		BUG(); | 
 | 100 | } | 
 | 101 | EXPORT_SYMBOL(dma_unmap_single); | 
 | 102 |  | 
 | 103 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
 | 104 | 		unsigned long offset, size_t size, | 
 | 105 | 		enum dma_data_direction direction) | 
 | 106 | { | 
 | 107 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 108 |  | 
 | 109 | 	if (dma_ops) | 
 | 110 | 		return dma_ops->map_single(dev, | 
 | 111 | 				(page_address(page) + offset), size, direction); | 
 | 112 | 	BUG(); | 
 | 113 | 	return (dma_addr_t)0; | 
 | 114 | } | 
 | 115 | EXPORT_SYMBOL(dma_map_page); | 
 | 116 |  | 
 | 117 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 
 | 118 | 		enum dma_data_direction direction) | 
 | 119 | { | 
 | 120 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 121 |  | 
 | 122 | 	if (dma_ops) | 
 | 123 | 		dma_ops->unmap_single(dev, dma_address, size, direction); | 
 | 124 | 	else | 
 | 125 | 		BUG(); | 
 | 126 | } | 
 | 127 | EXPORT_SYMBOL(dma_unmap_page); | 
 | 128 |  | 
 | 129 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
 | 130 | 		enum dma_data_direction direction) | 
 | 131 | { | 
 | 132 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 133 |  | 
 | 134 | 	if (dma_ops) | 
 | 135 | 		return dma_ops->map_sg(dev, sg, nents, direction); | 
 | 136 | 	BUG(); | 
 | 137 | 	return 0; | 
 | 138 | } | 
 | 139 | EXPORT_SYMBOL(dma_map_sg); | 
 | 140 |  | 
 | 141 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 
 | 142 | 		enum dma_data_direction direction) | 
 | 143 | { | 
 | 144 | 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 
 | 145 |  | 
 | 146 | 	if (dma_ops) | 
 | 147 | 		dma_ops->unmap_sg(dev, sg, nhwentries, direction); | 
 | 148 | 	else | 
 | 149 | 		BUG(); | 
 | 150 | } | 
 | 151 | EXPORT_SYMBOL(dma_unmap_sg); |