| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Coherent per-device memory handling. | 
 | 3 |  * Borrowed from i386 | 
 | 4 |  */ | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 5 | #include <linux/slab.h> | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 6 | #include <linux/kernel.h> | 
 | 7 | #include <linux/dma-mapping.h> | 
 | 8 |  | 
 | 9 | struct dma_coherent_mem { | 
 | 10 | 	void		*virt_base; | 
| Marin Mitov | ed1d218 | 2010-05-31 13:03:04 +0300 | [diff] [blame] | 11 | 	dma_addr_t	device_base; | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 12 | 	int		size; | 
 | 13 | 	int		flags; | 
 | 14 | 	unsigned long	*bitmap; | 
 | 15 | }; | 
 | 16 |  | 
 | 17 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | 
 | 18 | 				dma_addr_t device_addr, size_t size, int flags) | 
 | 19 | { | 
 | 20 | 	void __iomem *mem_base = NULL; | 
 | 21 | 	int pages = size >> PAGE_SHIFT; | 
 | 22 | 	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | 
 | 23 |  | 
 | 24 | 	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | 
 | 25 | 		goto out; | 
 | 26 | 	if (!size) | 
 | 27 | 		goto out; | 
 | 28 | 	if (dev->dma_mem) | 
 | 29 | 		goto out; | 
 | 30 |  | 
 | 31 | 	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | 
 | 32 |  | 
 | 33 | 	mem_base = ioremap(bus_addr, size); | 
 | 34 | 	if (!mem_base) | 
 | 35 | 		goto out; | 
 | 36 |  | 
 | 37 | 	dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | 
 | 38 | 	if (!dev->dma_mem) | 
 | 39 | 		goto out; | 
 | 40 | 	dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 
 | 41 | 	if (!dev->dma_mem->bitmap) | 
 | 42 | 		goto free1_out; | 
 | 43 |  | 
 | 44 | 	dev->dma_mem->virt_base = mem_base; | 
 | 45 | 	dev->dma_mem->device_base = device_addr; | 
 | 46 | 	dev->dma_mem->size = pages; | 
 | 47 | 	dev->dma_mem->flags = flags; | 
 | 48 |  | 
 | 49 | 	if (flags & DMA_MEMORY_MAP) | 
 | 50 | 		return DMA_MEMORY_MAP; | 
 | 51 |  | 
 | 52 | 	return DMA_MEMORY_IO; | 
 | 53 |  | 
 | 54 |  free1_out: | 
 | 55 | 	kfree(dev->dma_mem); | 
 | 56 |  out: | 
 | 57 | 	if (mem_base) | 
 | 58 | 		iounmap(mem_base); | 
 | 59 | 	return 0; | 
 | 60 | } | 
 | 61 | EXPORT_SYMBOL(dma_declare_coherent_memory); | 
 | 62 |  | 
 | 63 | void dma_release_declared_memory(struct device *dev) | 
 | 64 | { | 
 | 65 | 	struct dma_coherent_mem *mem = dev->dma_mem; | 
 | 66 |  | 
 | 67 | 	if (!mem) | 
 | 68 | 		return; | 
 | 69 | 	dev->dma_mem = NULL; | 
 | 70 | 	iounmap(mem->virt_base); | 
 | 71 | 	kfree(mem->bitmap); | 
 | 72 | 	kfree(mem); | 
 | 73 | } | 
 | 74 | EXPORT_SYMBOL(dma_release_declared_memory); | 
 | 75 |  | 
 | 76 | void *dma_mark_declared_memory_occupied(struct device *dev, | 
 | 77 | 					dma_addr_t device_addr, size_t size) | 
 | 78 | { | 
 | 79 | 	struct dma_coherent_mem *mem = dev->dma_mem; | 
 | 80 | 	int pos, err; | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 81 |  | 
| Jan Beulich | d2dc1f4 | 2008-08-05 13:01:31 -0700 | [diff] [blame] | 82 | 	size += device_addr & ~PAGE_MASK; | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 83 |  | 
 | 84 | 	if (!mem) | 
 | 85 | 		return ERR_PTR(-EINVAL); | 
 | 86 |  | 
 | 87 | 	pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | 
| Jan Beulich | d2dc1f4 | 2008-08-05 13:01:31 -0700 | [diff] [blame] | 88 | 	err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 89 | 	if (err != 0) | 
 | 90 | 		return ERR_PTR(err); | 
 | 91 | 	return mem->virt_base + (pos << PAGE_SHIFT); | 
 | 92 | } | 
 | 93 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | 
 | 94 |  | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 95 | /** | 
| Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 96 |  * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 97 |  * | 
 | 98 |  * @dev:	device from which we allocate memory | 
 | 99 |  * @size:	size of requested memory area | 
 | 100 |  * @dma_handle:	This will be filled with the correct dma handle | 
 | 101 |  * @ret:	This pointer will be filled with the virtual address | 
| Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 102 |  *		to allocated area. | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 103 |  * | 
| Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 104 |  * This function should be only called from per-arch dma_alloc_coherent() | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 105 |  * to support allocation from per-device coherent memory pools. | 
 | 106 |  * | 
 | 107 |  * Returns 0 if dma_alloc_coherent should continue with allocating from | 
| Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 108 |  * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 109 |  */ | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 110 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | 
 | 111 | 				       dma_addr_t *dma_handle, void **ret) | 
 | 112 | { | 
| Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 113 | 	struct dma_coherent_mem *mem; | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 114 | 	int order = get_order(size); | 
| Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 115 | 	int pageno; | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 116 |  | 
| Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 117 | 	if (!dev) | 
 | 118 | 		return 0; | 
 | 119 | 	mem = dev->dma_mem; | 
 | 120 | 	if (!mem) | 
 | 121 | 		return 0; | 
| Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 122 |  | 
 | 123 | 	*ret = NULL; | 
 | 124 |  | 
| Adrian McMenamin | cdf57ca | 2009-01-21 18:47:38 +0900 | [diff] [blame] | 125 | 	if (unlikely(size > (mem->size << PAGE_SHIFT))) | 
| Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 126 | 		goto err; | 
| Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 127 |  | 
 | 128 | 	pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | 
| Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 129 | 	if (unlikely(pageno < 0)) | 
 | 130 | 		goto err; | 
 | 131 |  | 
 | 132 | 	/* | 
 | 133 | 	 * Memory was found in the per-device area. | 
 | 134 | 	 */ | 
 | 135 | 	*dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | 
 | 136 | 	*ret = mem->virt_base + (pageno << PAGE_SHIFT); | 
 | 137 | 	memset(*ret, 0, size); | 
 | 138 |  | 
| Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 139 | 	return 1; | 
| Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 140 |  | 
 | 141 | err: | 
 | 142 | 	/* | 
 | 143 | 	 * In the case where the allocation can not be satisfied from the | 
 | 144 | 	 * per-device area, try to fall back to generic memory if the | 
 | 145 | 	 * constraints allow it. | 
 | 146 | 	 */ | 
 | 147 | 	return mem->flags & DMA_MEMORY_EXCLUSIVE; | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 148 | } | 
| Ingo Molnar | a38409f | 2008-08-20 12:16:09 +0200 | [diff] [blame] | 149 | EXPORT_SYMBOL(dma_alloc_from_coherent); | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 150 |  | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 151 | /** | 
| Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 152 |  * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 153 |  * @dev:	device from which the memory was allocated | 
 | 154 |  * @order:	the order of pages allocated | 
 | 155 |  * @vaddr:	virtual address of allocated pages | 
 | 156 |  * | 
 | 157 |  * This checks whether the memory was allocated from the per-device | 
 | 158 |  * coherent memory pool and if so, releases that memory. | 
 | 159 |  * | 
 | 160 |  * Returns 1 if we correctly released the memory, or 0 if | 
| Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 161 |  * dma_release_coherent() should proceed with releasing memory from | 
| Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 162 |  * generic pools. | 
 | 163 |  */ | 
| Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 164 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | 
 | 165 | { | 
 | 166 | 	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 
 | 167 |  | 
 | 168 | 	if (mem && vaddr >= mem->virt_base && vaddr < | 
 | 169 | 		   (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 
 | 170 | 		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | 
 | 171 |  | 
 | 172 | 		bitmap_release_region(mem->bitmap, page, order); | 
 | 173 | 		return 1; | 
 | 174 | 	} | 
 | 175 | 	return 0; | 
 | 176 | } | 
| Ingo Molnar | a38409f | 2008-08-20 12:16:09 +0200 | [diff] [blame] | 177 | EXPORT_SYMBOL(dma_release_from_coherent); |