blob: 6c168f6ea1428963150aa28a85739c6ca9352340 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2004 IBM Corporation
3 *
4 * Implements the generic device dma API for ppc64. Handles
5 * the pci and vio busses
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
10/* Include the busses we support */
11#include <linux/pci.h>
12#include <asm/vio.h>
Heiko J Schickd7a30102005-11-16 08:56:43 +010013#include <asm/ibmebus.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/scatterlist.h>
15#include <asm/bug.h>
16
17static struct dma_mapping_ops *get_dma_ops(struct device *dev)
18{
Stephen Rothwell145d01e2005-06-21 17:15:52 -070019#ifdef CONFIG_PCI
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 if (dev->bus == &pci_bus_type)
21 return &pci_dma_ops;
Stephen Rothwell145d01e2005-06-21 17:15:52 -070022#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#ifdef CONFIG_IBMVIO
24 if (dev->bus == &vio_bus_type)
25 return &vio_dma_ops;
26#endif
Heiko J Schickd7a30102005-11-16 08:56:43 +010027#ifdef CONFIG_IBMEBUS
28 if (dev->bus == &ibmebus_bus_type)
29 return &ibmebus_dma_ops;
30#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 return NULL;
32}
33
34int dma_supported(struct device *dev, u64 mask)
35{
36 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
37
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +100038 BUG_ON(!dma_ops);
39
40 return dma_ops->dma_supported(dev, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041}
42EXPORT_SYMBOL(dma_supported);
43
44int dma_set_mask(struct device *dev, u64 dma_mask)
45{
Stephen Rothwell145d01e2005-06-21 17:15:52 -070046#ifdef CONFIG_PCI
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 if (dev->bus == &pci_bus_type)
48 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
Stephen Rothwell145d01e2005-06-21 17:15:52 -070049#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#ifdef CONFIG_IBMVIO
51 if (dev->bus == &vio_bus_type)
52 return -EIO;
53#endif /* CONFIG_IBMVIO */
Heiko J Schickd7a30102005-11-16 08:56:43 +010054#ifdef CONFIG_IBMEBUS
55 if (dev->bus == &ibmebus_bus_type)
56 return -EIO;
57#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 BUG();
59 return 0;
60}
61EXPORT_SYMBOL(dma_set_mask);
62
63void *dma_alloc_coherent(struct device *dev, size_t size,
Al Virodd0fc662005-10-07 07:46:04 +010064 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065{
66 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
67
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +100068 BUG_ON(!dma_ops);
69
70 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72EXPORT_SYMBOL(dma_alloc_coherent);
73
74void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
75 dma_addr_t dma_handle)
76{
77 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
78
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +100079 BUG_ON(!dma_ops);
80
81 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83EXPORT_SYMBOL(dma_free_coherent);
84
85dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
86 enum dma_data_direction direction)
87{
88 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
89
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +100090 BUG_ON(!dma_ops);
91
92 return dma_ops->map_single(dev, cpu_addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94EXPORT_SYMBOL(dma_map_single);
95
96void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
97 enum dma_data_direction direction)
98{
99 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
100
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +1000101 BUG_ON(!dma_ops);
102
103 dma_ops->unmap_single(dev, dma_addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105EXPORT_SYMBOL(dma_unmap_single);
106
107dma_addr_t dma_map_page(struct device *dev, struct page *page,
108 unsigned long offset, size_t size,
109 enum dma_data_direction direction)
110{
111 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
112
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +1000113 BUG_ON(!dma_ops);
114
115 return dma_ops->map_single(dev, page_address(page) + offset, size,
116 direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
118EXPORT_SYMBOL(dma_map_page);
119
120void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
121 enum dma_data_direction direction)
122{
123 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
124
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +1000125 BUG_ON(!dma_ops);
126
127 dma_ops->unmap_single(dev, dma_address, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129EXPORT_SYMBOL(dma_unmap_page);
130
131int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
132 enum dma_data_direction direction)
133{
134 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
135
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +1000136 BUG_ON(!dma_ops);
137
138 return dma_ops->map_sg(dev, sg, nents, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
140EXPORT_SYMBOL(dma_map_sg);
141
142void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
143 enum dma_data_direction direction)
144{
145 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
146
Jeremy Kerr5d33eeb2006-07-13 16:32:52 +1000147 BUG_ON(!dma_ops);
148
149 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150}
151EXPORT_SYMBOL(dma_unmap_sg);