blob: f230a8de0bcd320a49fc3a06eebc7c7b035a5983 [file] [log] [blame]
Michal Simekccfe27d2010-01-14 11:21:02 +01001/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/dma-debug.h>
12#include <asm/bug.h>
Michal Simek2549edd2010-01-20 14:36:24 +010013#include <asm/cacheflush.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010014
15/*
16 * Generic direct DMA implementation
17 *
18 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
21 * default the offset is PCI_DRAM_OFFSET.
22 */
23
Michal Simekd79f3b02010-02-08 12:13:10 +010024static inline void __dma_sync_page(void *paddr, unsigned long offset,
Michal Simek2549edd2010-01-20 14:36:24 +010025 size_t size, enum dma_data_direction direction)
26{
Michal Simekd79f3b02010-02-08 12:13:10 +010027 unsigned long start = (unsigned long)paddr;
Michal Simek2549edd2010-01-20 14:36:24 +010028
29 switch (direction) {
30 case DMA_TO_DEVICE:
31 flush_dcache_range(start + offset, start + offset + size);
32 break;
33 case DMA_FROM_DEVICE:
34 invalidate_dcache_range(start + offset, start + offset + size);
35 break;
36 default:
37 BUG();
38 }
39}
40
Michal Simekccfe27d2010-01-14 11:21:02 +010041static unsigned long get_dma_direct_offset(struct device *dev)
42{
43 if (dev)
44 return (unsigned long)dev->archdata.dma_data;
45
46 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
47}
48
49void *dma_direct_alloc_coherent(struct device *dev, size_t size,
50 dma_addr_t *dma_handle, gfp_t flag)
51{
52 void *ret;
53 struct page *page;
54 int node = dev_to_node(dev);
55
56 /* ignore region specifiers */
57 flag &= ~(__GFP_HIGHMEM);
58
59 page = alloc_pages_node(node, flag, get_order(size));
60 if (page == NULL)
61 return NULL;
62 ret = page_address(page);
63 memset(ret, 0, size);
64 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
65
66 return ret;
67}
68
69void dma_direct_free_coherent(struct device *dev, size_t size,
70 void *vaddr, dma_addr_t dma_handle)
71{
72 free_pages((unsigned long)vaddr, get_order(size));
73}
74
75static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
76 int nents, enum dma_data_direction direction,
77 struct dma_attrs *attrs)
78{
79 struct scatterlist *sg;
80 int i;
81
Michal Simekd79f3b02010-02-08 12:13:10 +010082 /* FIXME this part of code is untested */
Michal Simekccfe27d2010-01-14 11:21:02 +010083 for_each_sg(sgl, sg, nents, i) {
84 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
85 sg->dma_length = sg->length;
Michal Simekd79f3b02010-02-08 12:13:10 +010086 __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
87 sg->length, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +010088 }
89
90 return nents;
91}
92
93static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
94 int nents, enum dma_data_direction direction,
95 struct dma_attrs *attrs)
96{
97}
98
99static int dma_direct_dma_supported(struct device *dev, u64 mask)
100{
101 return 1;
102}
103
104static inline dma_addr_t dma_direct_map_page(struct device *dev,
105 struct page *page,
106 unsigned long offset,
107 size_t size,
Michal Simek2549edd2010-01-20 14:36:24 +0100108 enum dma_data_direction direction,
Michal Simekccfe27d2010-01-14 11:21:02 +0100109 struct dma_attrs *attrs)
110{
Michal Simek2549edd2010-01-20 14:36:24 +0100111 BUG_ON(direction == DMA_NONE);
Michal Simekd79f3b02010-02-08 12:13:10 +0100112 __dma_sync_page(page_to_phys(page), offset, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100113 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
114}
115
116static inline void dma_direct_unmap_page(struct device *dev,
117 dma_addr_t dma_address,
118 size_t size,
119 enum dma_data_direction direction,
120 struct dma_attrs *attrs)
121{
Michal Simekd79f3b02010-02-08 12:13:10 +0100122/* There is not necessary to do cache cleanup
123 *
124 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
125 * dma_address is physical address
126 */
127 __dma_sync_page((void *)dma_address, 0 , size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100128}
129
130struct dma_map_ops dma_direct_ops = {
131 .alloc_coherent = dma_direct_alloc_coherent,
132 .free_coherent = dma_direct_free_coherent,
133 .map_sg = dma_direct_map_sg,
134 .unmap_sg = dma_direct_unmap_sg,
135 .dma_supported = dma_direct_dma_supported,
136 .map_page = dma_direct_map_page,
137 .unmap_page = dma_direct_unmap_page,
138};
139EXPORT_SYMBOL(dma_direct_ops);
140
141/* Number of entries preallocated for DMA-API debugging */
142#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
143
144static int __init dma_init(void)
145{
146 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
147
148 return 0;
149}
150fs_initcall(dma_init);