blob: f4d4b1850a7ec0f9ae0ba0257b2d019665412092 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_DMA_MAPPING_H
2#define _ASM_IA64_DMA_MAPPING_H
3
4/*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/machvec.h>
Jens Axboe9b6eccf2007-10-16 11:27:26 +02009#include <linux/scatterlist.h>
Fenghua Yu62fdd762008-10-17 12:14:13 -070010#include <asm/swiotlb.h>
11
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090012extern struct dma_map_ops *dma_ops;
Fenghua Yu62fdd762008-10-17 12:14:13 -070013extern struct ia64_machine_vector ia64_mv;
14extern void set_iommu_machvec(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
FUJITA Tomonoric2990302009-01-07 02:13:42 +090016extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
17 enum dma_data_direction);
18extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
19 enum dma_data_direction);
20
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090021static inline void *dma_alloc_coherent(struct device *dev, size_t size,
22 dma_addr_t *daddr, gfp_t gfp)
23{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090024 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090025 return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090026}
FUJITA Tomonori3a80b6a2008-09-08 18:10:10 +090027
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090028static inline void dma_free_coherent(struct device *dev, size_t size,
29 void *caddr, dma_addr_t daddr)
Roland Dreierb7de8e72007-02-14 00:32:53 -080030{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090031 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090032 ops->free_coherent(dev, size, caddr, daddr);
Roland Dreierb7de8e72007-02-14 00:32:53 -080033}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090034
35#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
36#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
37
38static inline dma_addr_t dma_map_single_attrs(struct device *dev,
39 void *caddr, size_t size,
40 enum dma_data_direction dir,
41 struct dma_attrs *attrs)
Roland Dreierb7de8e72007-02-14 00:32:53 -080042{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090043 struct dma_map_ops *ops = platform_dma_get_ops(dev);
44 return ops->map_page(dev, virt_to_page(caddr),
45 (unsigned long)caddr & ~PAGE_MASK, size,
46 dir, attrs);
Roland Dreierb7de8e72007-02-14 00:32:53 -080047}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090048
49static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
50 size_t size,
51 enum dma_data_direction dir,
52 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -070053{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090054 struct dma_map_ops *ops = platform_dma_get_ops(dev);
55 ops->unmap_page(dev, daddr, size, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -070056}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090057
58#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
59#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
60
61static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
62 int nents, enum dma_data_direction dir,
63 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -070064{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090065 struct dma_map_ops *ops = platform_dma_get_ops(dev);
66 return ops->map_sg(dev, sgl, nents, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -070067}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090068
69static inline void dma_unmap_sg_attrs(struct device *dev,
70 struct scatterlist *sgl, int nents,
71 enum dma_data_direction dir,
72 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -070073{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090074 struct dma_map_ops *ops = platform_dma_get_ops(dev);
75 ops->unmap_sg(dev, sgl, nents, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -070076}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090077
78#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
79#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
80
81static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
82 size_t size,
83 enum dma_data_direction dir)
Arthur Kepner309df0c2008-04-29 01:00:32 -070084{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090085 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090086 ops->sync_single_for_cpu(dev, daddr, size, dir);
Arthur Kepner309df0c2008-04-29 01:00:32 -070087}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090088
89static inline void dma_sync_sg_for_cpu(struct device *dev,
90 struct scatterlist *sgl,
91 int nents, enum dma_data_direction dir)
92{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090093 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090094 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090095}
96
97static inline void dma_sync_single_for_device(struct device *dev,
98 dma_addr_t daddr,
99 size_t size,
100 enum dma_data_direction dir)
101{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900102 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +0900103 ops->sync_single_for_device(dev, daddr, size, dir);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900104}
105
106static inline void dma_sync_sg_for_device(struct device *dev,
107 struct scatterlist *sgl,
108 int nents,
109 enum dma_data_direction dir)
110{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900111 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +0900112 ops->sync_sg_for_device(dev, sgl, nents, dir);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900113}
114
115static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
116{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900117 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +0900118 return ops->mapping_error(dev, daddr);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900119}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900121static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
122 size_t offset, size_t size,
123 enum dma_data_direction dir)
124{
125 struct dma_map_ops *ops = platform_dma_get_ops(dev);
126 return ops->map_page(dev, page, offset, size, dir, NULL);
127}
128
129static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
130 size_t size, enum dma_data_direction dir)
131{
132 dma_unmap_single(dev, addr, size, dir);
133}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/*
136 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
137 * See Documentation/DMA-API.txt for details.
138 */
139
140#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
141 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
142#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
143 dma_sync_single_for_device(dev, dma_handle, size, dir)
144
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900145static inline int dma_supported(struct device *dev, u64 mask)
146{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900147 struct dma_map_ops *ops = platform_dma_get_ops(dev);
148 return ops->dma_supported(dev, mask);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900149}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
151static inline int
152dma_set_mask (struct device *dev, u64 mask)
153{
154 if (!dev->dma_mask || !dma_supported(dev, mask))
155 return -EIO;
156 *dev->dma_mask = mask;
157 return 0;
158}
159
John W. Linvillee1531b42005-11-07 00:57:54 -0800160extern int dma_get_cache_alignment(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162static inline void
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800163dma_cache_sync (struct device *dev, void *vaddr, size_t size,
164 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
166 /*
167 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
168 * ensure that dma_cache_sync() enforces order, hence the mb().
169 */
170 mb();
171}
172
Ralf Baechlef67637e2006-12-06 20:38:54 -0800173#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175#endif /* _ASM_IA64_DMA_MAPPING_H */