blob: 65a4af4cbbbe864bb10eb75ff7fd1426c8021950 [file] [log] [blame]
Michal Simekccfe27d2010-01-14 11:21:02 +01001/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010012#include <linux/dma-debug.h>
Paul Gortmaker66421a62011-09-22 11:22:55 -040013#include <linux/export.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010014#include <asm/bug.h>
15
16/*
17 * Generic direct DMA implementation
18 *
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
23 */
Michal Simek2549edd2010-01-20 14:36:24 +010024
Michal Simekccfe27d2010-01-14 11:21:02 +010025static unsigned long get_dma_direct_offset(struct device *dev)
26{
Michal Simek78ebfa82010-03-23 15:37:02 +010027 if (likely(dev))
Michal Simekccfe27d2010-01-14 11:21:02 +010028 return (unsigned long)dev->archdata.dma_data;
29
30 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
31}
32
Michal Simek1be53e02010-03-11 14:15:48 +010033#define NOT_COHERENT_CACHE
34
35static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
Michal Simekccfe27d2010-01-14 11:21:02 +010036 dma_addr_t *dma_handle, gfp_t flag)
37{
Michal Simek1be53e02010-03-11 14:15:48 +010038#ifdef NOT_COHERENT_CACHE
39 return consistent_alloc(flag, size, dma_handle);
40#else
Michal Simekccfe27d2010-01-14 11:21:02 +010041 void *ret;
42 struct page *page;
43 int node = dev_to_node(dev);
44
45 /* ignore region specifiers */
46 flag &= ~(__GFP_HIGHMEM);
47
48 page = alloc_pages_node(node, flag, get_order(size));
49 if (page == NULL)
50 return NULL;
51 ret = page_address(page);
52 memset(ret, 0, size);
53 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
54
55 return ret;
Michal Simek1be53e02010-03-11 14:15:48 +010056#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010057}
58
Michal Simek1be53e02010-03-11 14:15:48 +010059static void dma_direct_free_coherent(struct device *dev, size_t size,
Michal Simekccfe27d2010-01-14 11:21:02 +010060 void *vaddr, dma_addr_t dma_handle)
61{
Michal Simek1be53e02010-03-11 14:15:48 +010062#ifdef NOT_COHERENT_CACHE
Michal Simekf1525762010-04-10 17:34:06 +020063 consistent_free(size, vaddr);
Michal Simek1be53e02010-03-11 14:15:48 +010064#else
Michal Simekccfe27d2010-01-14 11:21:02 +010065 free_pages((unsigned long)vaddr, get_order(size));
Michal Simek1be53e02010-03-11 14:15:48 +010066#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010067}
68
69static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
70 int nents, enum dma_data_direction direction,
71 struct dma_attrs *attrs)
72{
73 struct scatterlist *sg;
74 int i;
75
Michal Simekd79f3b02010-02-08 12:13:10 +010076 /* FIXME this part of code is untested */
Michal Simekccfe27d2010-01-14 11:21:02 +010077 for_each_sg(sgl, sg, nents, i) {
78 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
Eli Billauercf560c12011-09-11 22:43:06 +030079 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
Michal Simekd79f3b02010-02-08 12:13:10 +010080 sg->length, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +010081 }
82
83 return nents;
84}
85
86static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
87 int nents, enum dma_data_direction direction,
88 struct dma_attrs *attrs)
89{
90}
91
92static int dma_direct_dma_supported(struct device *dev, u64 mask)
93{
94 return 1;
95}
96
97static inline dma_addr_t dma_direct_map_page(struct device *dev,
98 struct page *page,
99 unsigned long offset,
100 size_t size,
Michal Simek2549edd2010-01-20 14:36:24 +0100101 enum dma_data_direction direction,
Michal Simekccfe27d2010-01-14 11:21:02 +0100102 struct dma_attrs *attrs)
103{
Eli Billauercf560c12011-09-11 22:43:06 +0300104 __dma_sync(page_to_phys(page) + offset, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100105 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
106}
107
108static inline void dma_direct_unmap_page(struct device *dev,
109 dma_addr_t dma_address,
110 size_t size,
111 enum dma_data_direction direction,
112 struct dma_attrs *attrs)
113{
Michal Simekd79f3b02010-02-08 12:13:10 +0100114/* There is not necessary to do cache cleanup
115 *
116 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
117 * dma_address is physical address
118 */
Eli Billauercf560c12011-09-11 22:43:06 +0300119 __dma_sync(dma_address, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100120}
121
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300122static inline void
123dma_direct_sync_single_for_cpu(struct device *dev,
124 dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
126{
127 /*
128 * It's pointless to flush the cache as the memory segment
129 * is given to the CPU
130 */
131
132 if (direction == DMA_FROM_DEVICE)
133 __dma_sync(dma_handle, size, direction);
134}
135
136static inline void
137dma_direct_sync_single_for_device(struct device *dev,
138 dma_addr_t dma_handle, size_t size,
139 enum dma_data_direction direction)
140{
141 /*
142 * It's pointless to invalidate the cache if the device isn't
143 * supposed to write to the relevant region
144 */
145
146 if (direction == DMA_TO_DEVICE)
147 __dma_sync(dma_handle, size, direction);
148}
149
150static inline void
151dma_direct_sync_sg_for_cpu(struct device *dev,
152 struct scatterlist *sgl, int nents,
153 enum dma_data_direction direction)
154{
155 struct scatterlist *sg;
156 int i;
157
158 /* FIXME this part of code is untested */
159 if (direction == DMA_FROM_DEVICE)
160 for_each_sg(sgl, sg, nents, i)
161 __dma_sync(sg->dma_address, sg->length, direction);
162}
163
164static inline void
165dma_direct_sync_sg_for_device(struct device *dev,
166 struct scatterlist *sgl, int nents,
167 enum dma_data_direction direction)
168{
169 struct scatterlist *sg;
170 int i;
171
172 /* FIXME this part of code is untested */
173 if (direction == DMA_TO_DEVICE)
174 for_each_sg(sgl, sg, nents, i)
175 __dma_sync(sg->dma_address, sg->length, direction);
176}
177
Michal Simekccfe27d2010-01-14 11:21:02 +0100178struct dma_map_ops dma_direct_ops = {
179 .alloc_coherent = dma_direct_alloc_coherent,
180 .free_coherent = dma_direct_free_coherent,
181 .map_sg = dma_direct_map_sg,
182 .unmap_sg = dma_direct_unmap_sg,
183 .dma_supported = dma_direct_dma_supported,
184 .map_page = dma_direct_map_page,
185 .unmap_page = dma_direct_unmap_page,
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300186 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
187 .sync_single_for_device = dma_direct_sync_single_for_device,
188 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
189 .sync_sg_for_device = dma_direct_sync_sg_for_device,
Michal Simekccfe27d2010-01-14 11:21:02 +0100190};
191EXPORT_SYMBOL(dma_direct_ops);
192
193/* Number of entries preallocated for DMA-API debugging */
194#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
195
196static int __init dma_init(void)
197{
198 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
199
200 return 0;
201}
202fs_initcall(dma_init);