blob: 21b784d7e7d059f488b5685c052af8f7652696be [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11002 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11004 * Provide default implementations of the DMA mapping callbacks for
Becky Bruce8dd0e952008-09-08 09:09:53 +00005 * directly mapped busses.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +000010#include <linux/dma-debug.h>
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +100011#include <linux/lmb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/bug.h>
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110013#include <asm/abs_addr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110015/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110016 * Generic direct DMA implementation
Benjamin Herrenschmidt92b20c42006-11-11 17:25:14 +110017 *
Michael Ellerman31d1b492008-01-21 16:42:48 +110018 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
Becky Bruce4fc665b2008-09-12 10:34:46 +000021 * default the offset is PCI_DRAM_OFFSET.
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110022 */
23
Becky Bruceec3cf2e2009-05-14 12:42:28 +000024unsigned long get_dma_direct_offset(struct device *dev)
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110025{
Becky Bruce4fc665b2008-09-12 10:34:46 +000026 if (dev)
27 return (unsigned long)dev->archdata.dma_data;
28
29 return PCI_DRAM_OFFSET;
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110030}
31
Becky Bruce4fc665b2008-09-12 10:34:46 +000032void *dma_direct_alloc_coherent(struct device *dev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000035 void *ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000036#ifdef CONFIG_NOT_COHERENT_CACHE
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100037 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000038 if (ret == NULL)
39 return NULL;
40 *dma_handle += get_dma_direct_offset(dev);
41 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000042#else
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110043 struct page *page;
Becky Bruce8fae0352008-09-08 09:09:54 +000044 int node = dev_to_node(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Becky Bruce4fc665b2008-09-12 10:34:46 +000046 /* ignore region specifiers */
47 flag &= ~(__GFP_HIGHMEM);
48
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110049 page = alloc_pages_node(node, flag, get_order(size));
50 if (page == NULL)
51 return NULL;
52 ret = page_address(page);
53 memset(ret, 0, size);
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110054 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110055
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110056 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000057#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Becky Bruce4fc665b2008-09-12 10:34:46 +000060void dma_direct_free_coherent(struct device *dev, size_t size,
61 void *vaddr, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
Becky Bruce4fc665b2008-09-12 10:34:46 +000063#ifdef CONFIG_NOT_COHERENT_CACHE
64 __dma_free_coherent(size, vaddr);
65#else
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110066 free_pages((unsigned long)vaddr, get_order(size));
Becky Bruce4fc665b2008-09-12 10:34:46 +000067#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110068}
69
Jens Axboe78bdc312007-10-12 13:44:12 +020070static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
Mark Nelson3affedc2008-07-05 05:05:42 +100071 int nents, enum dma_data_direction direction,
72 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110073{
Jens Axboe78bdc312007-10-12 13:44:12 +020074 struct scatterlist *sg;
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110075 int i;
76
Jens Axboe78bdc312007-10-12 13:44:12 +020077 for_each_sg(sgl, sg, nents, i) {
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110078 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110079 sg->dma_length = sg->length;
Benjamin Herrenschmidt2434bbb2008-11-30 18:53:40 +000080 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110081 }
82
83 return nents;
84}
85
86static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100087 int nents, enum dma_data_direction direction,
88 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110089{
90}
91
92static int dma_direct_dma_supported(struct device *dev, u64 mask)
93{
Becky Bruce4fc665b2008-09-12 10:34:46 +000094#ifdef CONFIG_PPC64
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +100095 /* Could be improved so platforms can set the limit in case
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110096 * they have limited DMA windows
97 */
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +100098 return mask >= (lmb_end_of_DRAM() - 1);
Becky Bruce4fc665b2008-09-12 10:34:46 +000099#else
100 return 1;
101#endif
102}
103
104static inline dma_addr_t dma_direct_map_page(struct device *dev,
105 struct page *page,
106 unsigned long offset,
107 size_t size,
108 enum dma_data_direction dir,
109 struct dma_attrs *attrs)
110{
111 BUG_ON(dir == DMA_NONE);
112 __dma_sync_page(page, offset, size, dir);
113 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
114}
115
116static inline void dma_direct_unmap_page(struct device *dev,
117 dma_addr_t dma_address,
118 size_t size,
119 enum dma_data_direction direction,
120 struct dma_attrs *attrs)
121{
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100122}
123
Becky Bruce15e09c02008-11-20 06:49:16 +0000124#ifdef CONFIG_NOT_COHERENT_CACHE
125static inline void dma_direct_sync_sg(struct device *dev,
126 struct scatterlist *sgl, int nents,
127 enum dma_data_direction direction)
128{
129 struct scatterlist *sg;
130 int i;
131
132 for_each_sg(sgl, sg, nents, i)
133 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
134}
135
136static inline void dma_direct_sync_single_range(struct device *dev,
137 dma_addr_t dma_handle, unsigned long offset, size_t size,
138 enum dma_data_direction direction)
139{
140 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
141}
142#endif
143
FUJITA Tomonori45223c52009-08-04 19:08:25 +0000144struct dma_map_ops dma_direct_ops = {
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100145 .alloc_coherent = dma_direct_alloc_coherent,
146 .free_coherent = dma_direct_free_coherent,
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100147 .map_sg = dma_direct_map_sg,
148 .unmap_sg = dma_direct_unmap_sg,
149 .dma_supported = dma_direct_dma_supported,
Becky Bruce4fc665b2008-09-12 10:34:46 +0000150 .map_page = dma_direct_map_page,
151 .unmap_page = dma_direct_unmap_page,
Becky Bruce15e09c02008-11-20 06:49:16 +0000152#ifdef CONFIG_NOT_COHERENT_CACHE
153 .sync_single_range_for_cpu = dma_direct_sync_single_range,
154 .sync_single_range_for_device = dma_direct_sync_single_range,
155 .sync_sg_for_cpu = dma_direct_sync_sg,
156 .sync_sg_for_device = dma_direct_sync_sg,
157#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100158};
159EXPORT_SYMBOL(dma_direct_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000160
161#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
162
163static int __init dma_init(void)
164{
165 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
166
167 return 0;
168}
169fs_initcall(dma_init);