blob: ccf129d47d84d26ff702bf7c5c507a2f206ea5a5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11002 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11004 * Provide default implementations of the DMA mapping callbacks for
Becky Bruce8dd0e952008-09-08 09:09:53 +00005 * directly mapped busses.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +100010#include <linux/lmb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/bug.h>
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110012#include <asm/abs_addr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110014/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110015 * Generic direct DMA implementation
Benjamin Herrenschmidt92b20c42006-11-11 17:25:14 +110016 *
Michael Ellerman31d1b492008-01-21 16:42:48 +110017 * This implementation supports a per-device offset that can be applied if
18 * the address at which memory is visible to devices is not 0. Platform code
19 * can set archdata.dma_data to an unsigned long holding the offset. By
Becky Bruce4fc665b2008-09-12 10:34:46 +000020 * default the offset is PCI_DRAM_OFFSET.
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110021 */
22
Becky Bruceec3cf2e2009-05-14 12:42:28 +000023unsigned long get_dma_direct_offset(struct device *dev)
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110024{
Becky Bruce4fc665b2008-09-12 10:34:46 +000025 if (dev)
26 return (unsigned long)dev->archdata.dma_data;
27
28 return PCI_DRAM_OFFSET;
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110029}
30
Becky Bruce4fc665b2008-09-12 10:34:46 +000031void *dma_direct_alloc_coherent(struct device *dev, size_t size,
32 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000034 void *ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000035#ifdef CONFIG_NOT_COHERENT_CACHE
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100036 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000037 if (ret == NULL)
38 return NULL;
39 *dma_handle += get_dma_direct_offset(dev);
40 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000041#else
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110042 struct page *page;
Becky Bruce8fae0352008-09-08 09:09:54 +000043 int node = dev_to_node(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Becky Bruce4fc665b2008-09-12 10:34:46 +000045 /* ignore region specifiers */
46 flag &= ~(__GFP_HIGHMEM);
47
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110048 page = alloc_pages_node(node, flag, get_order(size));
49 if (page == NULL)
50 return NULL;
51 ret = page_address(page);
52 memset(ret, 0, size);
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110053 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110054
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110055 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000056#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Becky Bruce4fc665b2008-09-12 10:34:46 +000059void dma_direct_free_coherent(struct device *dev, size_t size,
60 void *vaddr, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Becky Bruce4fc665b2008-09-12 10:34:46 +000062#ifdef CONFIG_NOT_COHERENT_CACHE
63 __dma_free_coherent(size, vaddr);
64#else
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110065 free_pages((unsigned long)vaddr, get_order(size));
Becky Bruce4fc665b2008-09-12 10:34:46 +000066#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110067}
68
Jens Axboe78bdc312007-10-12 13:44:12 +020069static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
Mark Nelson3affedc2008-07-05 05:05:42 +100070 int nents, enum dma_data_direction direction,
71 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110072{
Jens Axboe78bdc312007-10-12 13:44:12 +020073 struct scatterlist *sg;
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110074 int i;
75
Jens Axboe78bdc312007-10-12 13:44:12 +020076 for_each_sg(sgl, sg, nents, i) {
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110077 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110078 sg->dma_length = sg->length;
Benjamin Herrenschmidt2434bbb2008-11-30 18:53:40 +000079 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110080 }
81
82 return nents;
83}
84
85static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100086 int nents, enum dma_data_direction direction,
87 struct dma_attrs *attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110088{
89}
90
91static int dma_direct_dma_supported(struct device *dev, u64 mask)
92{
Becky Bruce4fc665b2008-09-12 10:34:46 +000093#ifdef CONFIG_PPC64
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +100094 /* Could be improved so platforms can set the limit in case
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110095 * they have limited DMA windows
96 */
Benjamin Herrenschmidtb2f2e8f2009-08-10 16:36:38 +100097 return mask >= (lmb_end_of_DRAM() - 1);
Becky Bruce4fc665b2008-09-12 10:34:46 +000098#else
99 return 1;
100#endif
101}
102
103static inline dma_addr_t dma_direct_map_page(struct device *dev,
104 struct page *page,
105 unsigned long offset,
106 size_t size,
107 enum dma_data_direction dir,
108 struct dma_attrs *attrs)
109{
110 BUG_ON(dir == DMA_NONE);
111 __dma_sync_page(page, offset, size, dir);
112 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
113}
114
115static inline void dma_direct_unmap_page(struct device *dev,
116 dma_addr_t dma_address,
117 size_t size,
118 enum dma_data_direction direction,
119 struct dma_attrs *attrs)
120{
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100121}
122
Becky Bruce15e09c02008-11-20 06:49:16 +0000123#ifdef CONFIG_NOT_COHERENT_CACHE
124static inline void dma_direct_sync_sg(struct device *dev,
125 struct scatterlist *sgl, int nents,
126 enum dma_data_direction direction)
127{
128 struct scatterlist *sg;
129 int i;
130
131 for_each_sg(sgl, sg, nents, i)
132 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
133}
134
135static inline void dma_direct_sync_single_range(struct device *dev,
136 dma_addr_t dma_handle, unsigned long offset, size_t size,
137 enum dma_data_direction direction)
138{
139 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
140}
141#endif
142
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100143struct dma_mapping_ops dma_direct_ops = {
144 .alloc_coherent = dma_direct_alloc_coherent,
145 .free_coherent = dma_direct_free_coherent,
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100146 .map_sg = dma_direct_map_sg,
147 .unmap_sg = dma_direct_unmap_sg,
148 .dma_supported = dma_direct_dma_supported,
Becky Bruce4fc665b2008-09-12 10:34:46 +0000149 .map_page = dma_direct_map_page,
150 .unmap_page = dma_direct_unmap_page,
Becky Bruce15e09c02008-11-20 06:49:16 +0000151#ifdef CONFIG_NOT_COHERENT_CACHE
152 .sync_single_range_for_cpu = dma_direct_sync_single_range,
153 .sync_single_range_for_device = dma_direct_sync_single_range,
154 .sync_sg_for_cpu = dma_direct_sync_sg,
155 .sync_sg_for_device = dma_direct_sync_sg,
156#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100157};
158EXPORT_SYMBOL(dma_direct_ops);