| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Basic general purpose allocator for managing special purpose memory | 
 | 3 |  * not managed by the regular kmalloc/kfree interface. | 
 | 4 |  * Uses for this includes on-device special memory, uncached memory | 
 | 5 |  * etc. | 
 | 6 |  * | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 7 |  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> | 
 | 8 |  * | 
 | 9 |  * This source code is licensed under the GNU General Public License, | 
 | 10 |  * Version 2.  See the file COPYING for more details. | 
 | 11 |  */ | 
 | 12 |  | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 14 | #include <linux/module.h> | 
| Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 15 | #include <linux/bitmap.h> | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 16 | #include <linux/genalloc.h> | 
 | 17 |  | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 18 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 19 | /* General purpose special memory pool descriptor. */ | 
 | 20 | struct gen_pool { | 
 | 21 | 	rwlock_t lock;			/* protects chunks list */ | 
 | 22 | 	struct list_head chunks;	/* list of chunks in this pool */ | 
 | 23 | 	unsigned order;			/* minimum allocation order */ | 
 | 24 | }; | 
 | 25 |  | 
 | 26 | /* General purpose special memory pool chunk descriptor. */ | 
 | 27 | struct gen_pool_chunk { | 
 | 28 | 	spinlock_t lock;		/* protects bits */ | 
 | 29 | 	struct list_head next_chunk;	/* next chunk in pool */ | 
 | 30 | 	phys_addr_t phys_addr;		/* physical starting address of memory chunk */ | 
 | 31 | 	unsigned long start;		/* start of memory chunk */ | 
 | 32 | 	unsigned long size;		/* number of bits */ | 
 | 33 | 	unsigned long bits[0];		/* bitmap for allocating memory chunk */ | 
 | 34 | }; | 
 | 35 |  | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 36 | /** | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 37 |  * gen_pool_create() - create a new special memory pool | 
 | 38 |  * @order:	Log base 2 of number of bytes each bitmap bit | 
 | 39 |  *		represents. | 
 | 40 |  * @nid:	Node id of the node the pool structure should be allocated | 
 | 41 |  *		on, or -1.  This will be also used for other allocations. | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 42 |  * | 
 | 43 |  * Create a new special memory pool that can be used to manage special purpose | 
 | 44 |  * memory not managed by the regular kmalloc/kfree interface. | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 45 |  */ | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 46 | struct gen_pool *__must_check gen_pool_create(unsigned order, int nid) | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 47 | { | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 48 | 	struct gen_pool *pool; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 49 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 50 | 	if (WARN_ON(order >= BITS_PER_LONG)) | 
 | 51 | 		return NULL; | 
 | 52 |  | 
 | 53 | 	pool = kmalloc_node(sizeof *pool, GFP_KERNEL, nid); | 
 | 54 | 	if (pool) { | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 55 | 		rwlock_init(&pool->lock); | 
 | 56 | 		INIT_LIST_HEAD(&pool->chunks); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 57 | 		pool->order = order; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 58 | 	} | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 59 | 	return pool; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 60 | } | 
 | 61 | EXPORT_SYMBOL(gen_pool_create); | 
 | 62 |  | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 63 | /** | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 64 |  * gen_pool_add_virt - add a new chunk of special memory to the pool | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 65 |  * @pool: pool to add new memory chunk to | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 66 |  * @virt: virtual starting address of memory chunk to add to pool | 
 | 67 |  * @phys: physical starting address of memory chunk to add to pool | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 68 |  * @size: size in bytes of the memory chunk to add to pool | 
 | 69 |  * @nid: node id of the node the chunk structure and bitmap should be | 
 | 70 |  *       allocated on, or -1 | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 71 |  * | 
 | 72 |  * Add a new chunk of special memory to the specified pool. | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 73 |  * | 
 | 74 |  * Returns 0 on success or a -ve errno on failure. | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 75 |  */ | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 76 | int __must_check gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 77 | 		 size_t size, int nid) | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 78 | { | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 79 | 	struct gen_pool_chunk *chunk; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 80 | 	size_t nbytes; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 81 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 82 | 	if (WARN_ON(!virt || virt + size < virt || | 
 | 83 | 	    (virt & ((1 << pool->order) - 1)))) | 
 | 84 | 		return -EINVAL; | 
 | 85 |  | 
 | 86 | 	size = size >> pool->order; | 
 | 87 | 	if (WARN_ON(!size)) | 
 | 88 | 		return -EINVAL; | 
 | 89 |  | 
 | 90 | 	nbytes = sizeof *chunk + BITS_TO_LONGS(size) * sizeof *chunk->bits; | 
 | 91 | 	chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); | 
 | 92 | 	if (!chunk) | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 93 | 		return -ENOMEM; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 94 |  | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 95 | 	spin_lock_init(&chunk->lock); | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 96 | 	chunk->phys_addr = phys; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 97 | 	chunk->start = virt >> pool->order; | 
 | 98 | 	chunk->size  = size; | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 99 |  | 
 | 100 | 	write_lock(&pool->lock); | 
 | 101 | 	list_add(&chunk->next_chunk, &pool->chunks); | 
 | 102 | 	write_unlock(&pool->lock); | 
 | 103 |  | 
 | 104 | 	return 0; | 
 | 105 | } | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 106 | EXPORT_SYMBOL(gen_pool_add_virt); | 
 | 107 |  | 
 | 108 | /** | 
 | 109 |  * gen_pool_virt_to_phys - return the physical address of memory | 
 | 110 |  * @pool: pool to allocate from | 
 | 111 |  * @addr: starting address of memory | 
 | 112 |  * | 
 | 113 |  * Returns the physical address on success, or -1 on error. | 
 | 114 |  */ | 
 | 115 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) | 
 | 116 | { | 
 | 117 | 	struct list_head *_chunk; | 
 | 118 | 	struct gen_pool_chunk *chunk; | 
 | 119 |  | 
 | 120 | 	read_lock(&pool->lock); | 
 | 121 | 	list_for_each(_chunk, &pool->chunks) { | 
 | 122 | 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 
 | 123 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 124 | 		if (addr >= chunk->start && | 
 | 125 | 		    addr < (chunk->start + chunk->size)) | 
 | 126 | 			return chunk->phys_addr + addr - chunk->start; | 
| Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 127 | 	} | 
 | 128 | 	read_unlock(&pool->lock); | 
 | 129 |  | 
 | 130 | 	return -1; | 
 | 131 | } | 
 | 132 | EXPORT_SYMBOL(gen_pool_virt_to_phys); | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 133 |  | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 134 | /** | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 135 |  * gen_pool_destroy() - destroy a special memory pool | 
 | 136 |  * @pool:	Pool to destroy. | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 137 |  * | 
 | 138 |  * Destroy the specified special memory pool. Verifies that there are no | 
 | 139 |  * outstanding allocations. | 
| Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 140 |  */ | 
 | 141 | void gen_pool_destroy(struct gen_pool *pool) | 
 | 142 | { | 
| Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 143 | 	struct gen_pool_chunk *chunk; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 144 | 	int bit; | 
| Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 145 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 146 | 	while (!list_empty(&pool->chunks)) { | 
 | 147 | 		chunk = list_entry(pool->chunks.next, struct gen_pool_chunk, | 
 | 148 | 				   next_chunk); | 
| Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 149 | 		list_del(&chunk->next_chunk); | 
 | 150 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 151 | 		bit = find_next_bit(chunk->bits, chunk->size, 0); | 
 | 152 | 		BUG_ON(bit < chunk->size); | 
| Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 153 |  | 
 | 154 | 		kfree(chunk); | 
 | 155 | 	} | 
 | 156 | 	kfree(pool); | 
| Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 157 | } | 
 | 158 | EXPORT_SYMBOL(gen_pool_destroy); | 
 | 159 |  | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 160 | /** | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 161 |  * gen_pool_alloc_aligned() - allocate special memory from the pool | 
 | 162 |  * @pool:	Pool to allocate from. | 
 | 163 |  * @size:	Number of bytes to allocate from the pool. | 
 | 164 |  * @alignment_order:	Order the allocated space should be | 
 | 165 |  *			aligned to (eg. 20 means allocated space | 
 | 166 |  *			must be aligned to 1MiB). | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 167 |  * | 
 | 168 |  * Allocate the requested number of bytes from the specified pool. | 
 | 169 |  * Uses a first-fit algorithm. | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 170 |  */ | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 171 | unsigned long __must_check | 
 | 172 | gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, | 
 | 173 | 		       unsigned alignment_order) | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 174 | { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 175 | 	unsigned long addr, align_mask = 0, flags, start; | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 176 | 	struct gen_pool_chunk *chunk; | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 177 |  | 
 | 178 | 	if (size == 0) | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 179 | 		return 0; | 
 | 180 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 181 | 	if (alignment_order > pool->order) | 
 | 182 | 		align_mask = (1 << (alignment_order - pool->order)) - 1; | 
 | 183 |  | 
 | 184 | 	size = (size + (1UL << pool->order) - 1) >> pool->order; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 185 |  | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 186 | 	read_lock(&pool->lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 187 | 	list_for_each_entry(chunk, &pool->chunks, next_chunk) { | 
 | 188 | 		if (chunk->size < size) | 
 | 189 | 			continue; | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 190 |  | 
 | 191 | 		spin_lock_irqsave(&chunk->lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 192 | 		start = bitmap_find_next_zero_area_off(chunk->bits, chunk->size, | 
 | 193 | 						       0, size, align_mask, | 
 | 194 | 						       chunk->start); | 
 | 195 | 		if (start >= chunk->size) { | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 196 | 			spin_unlock_irqrestore(&chunk->lock, flags); | 
| Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 197 | 			continue; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 198 | 		} | 
| Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 199 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 200 | 		bitmap_set(chunk->bits, start, size); | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 201 | 		spin_unlock_irqrestore(&chunk->lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 202 | 		addr = (chunk->start + start) << pool->order; | 
 | 203 | 		goto done; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 204 | 	} | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 205 |  | 
 | 206 | 	addr = 0; | 
 | 207 | done: | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 208 | 	read_unlock(&pool->lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 209 | 	return addr; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 210 | } | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 211 | EXPORT_SYMBOL(gen_pool_alloc_aligned); | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 212 |  | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 213 | /** | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 214 |  * gen_pool_free() - free allocated special memory back to the pool | 
 | 215 |  * @pool:	Pool to free to. | 
 | 216 |  * @addr:	Starting address of memory to free back to pool. | 
 | 217 |  * @size:	Size in bytes of memory to free. | 
| Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 218 |  * | 
 | 219 |  * Free previously allocated special memory back to the specified pool. | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 220 |  */ | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 221 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 222 | { | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 223 | 	struct gen_pool_chunk *chunk; | 
 | 224 | 	unsigned long flags; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 225 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 226 | 	if (!size) | 
 | 227 | 		return; | 
 | 228 |  | 
 | 229 | 	addr = addr >> pool->order; | 
 | 230 | 	size = (size + (1UL << pool->order) - 1) >> pool->order; | 
 | 231 |  | 
 | 232 | 	BUG_ON(addr + size < addr); | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 233 |  | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 234 | 	read_lock(&pool->lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 235 | 	list_for_each_entry(chunk, &pool->chunks, next_chunk) | 
 | 236 | 		if (addr >= chunk->start && | 
 | 237 | 		    addr + size <= chunk->start + chunk->size) { | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 238 | 			spin_lock_irqsave(&chunk->lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 239 | 			bitmap_clear(chunk->bits, addr - chunk->start, size); | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 240 | 			spin_unlock_irqrestore(&chunk->lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 241 | 			goto done; | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 242 | 		} | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 243 | 	BUG_ON(1); | 
 | 244 | done: | 
| Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 245 | 	read_unlock(&pool->lock); | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 246 | } | 
 | 247 | EXPORT_SYMBOL(gen_pool_free); |