blob: 2e020deab3c5957b1a46328c947b2dfdff1bda20 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/page.h>
14#include <linux/io.h>
15#include <linux/memory_alloc.h>
16#include <linux/mm.h>
17#include <linux/vmalloc.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/err.h>
21#include <linux/log2.h>
22
23#define MAX_MEMPOOLS 8
24
25struct mem_pool mpools[MAX_MEMPOOLS];
26
27/* The tree contains all allocations over all memory pools */
28static struct rb_root alloc_root;
29static struct mutex alloc_mutex;
30
31static struct alloc *find_alloc(void *addr)
32{
33 struct rb_root *root = &alloc_root;
34 struct rb_node *p = root->rb_node;
35
36 mutex_lock(&alloc_mutex);
37
38 while (p) {
39 struct alloc *node;
40
41 node = rb_entry(p, struct alloc, rb_node);
42 if (addr < node->vaddr)
43 p = p->rb_left;
44 else if (addr > node->vaddr)
45 p = p->rb_right;
46 else {
47 mutex_unlock(&alloc_mutex);
48 return node;
49 }
50 }
51 mutex_unlock(&alloc_mutex);
52 return NULL;
53}
54
55static int add_alloc(struct alloc *node)
56{
57 struct rb_root *root = &alloc_root;
58 struct rb_node **p = &root->rb_node;
59 struct rb_node *parent = NULL;
60
61 mutex_lock(&alloc_mutex);
62 while (*p) {
63 struct alloc *tmp;
64 parent = *p;
65
66 tmp = rb_entry(parent, struct alloc, rb_node);
67
68 if (node->vaddr < tmp->vaddr)
69 p = &(*p)->rb_left;
70 else if (node->vaddr > tmp->vaddr)
71 p = &(*p)->rb_right;
72 else {
73 WARN(1, "memory at %p already allocated", tmp->vaddr);
74 mutex_unlock(&alloc_mutex);
75 return -EINVAL;
76 }
77 }
78 rb_link_node(&node->rb_node, parent, p);
79 rb_insert_color(&node->rb_node, root);
80 mutex_unlock(&alloc_mutex);
81 return 0;
82}
83
84static int remove_alloc(struct alloc *victim_node)
85{
86 struct rb_root *root = &alloc_root;
87 if (!victim_node)
88 return -EINVAL;
89
90 mutex_lock(&alloc_mutex);
91 rb_erase(&victim_node->rb_node, root);
92 mutex_unlock(&alloc_mutex);
93 return 0;
94}
95
96static struct gen_pool *initialize_gpool(unsigned long start,
97 unsigned long size)
98{
99 struct gen_pool *gpool;
100
101 gpool = gen_pool_create(PAGE_SHIFT, -1);
102
103 if (!gpool)
104 return NULL;
105 if (gen_pool_add(gpool, start, size, -1)) {
106 gen_pool_destroy(gpool);
107 return NULL;
108 }
109
110 return gpool;
111}
112
113static void *__alloc(struct mem_pool *mpool, unsigned long size,
114 unsigned long align, int cached)
115{
116 unsigned long paddr;
117 void __iomem *vaddr;
118
119 unsigned long aligned_size;
120 int log_align = ilog2(align);
121
122 struct alloc *node;
123
124 aligned_size = PFN_ALIGN(size);
125 paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
126 if (!paddr)
127 return NULL;
128
129 node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
130 if (!node)
131 goto out;
132
133 if (cached)
134 vaddr = ioremap_cached(paddr, aligned_size);
135 else
136 vaddr = ioremap(paddr, aligned_size);
137
138 if (!vaddr)
139 goto out_kfree;
140
141 node->vaddr = vaddr;
142 node->paddr = paddr;
143 node->len = aligned_size;
144 node->mpool = mpool;
145 if (add_alloc(node))
146 goto out_kfree;
147
148 mpool->free -= aligned_size;
149
150 return vaddr;
151out_kfree:
152 if (vaddr)
153 iounmap(vaddr);
154 kfree(node);
155out:
156 gen_pool_free(mpool->gpool, paddr, aligned_size);
157 return NULL;
158}
159
160static void __free(void *vaddr, bool unmap)
161{
162 struct alloc *node = find_alloc(vaddr);
163
164 if (!node)
165 return;
166
167 if (unmap)
168 iounmap(node->vaddr);
169
170 gen_pool_free(node->mpool->gpool, node->paddr, node->len);
171 node->mpool->free += node->len;
172
173 remove_alloc(node);
174 kfree(node);
175}
176
177static struct mem_pool *mem_type_to_memory_pool(int mem_type)
178{
179 struct mem_pool *mpool = &mpools[mem_type];
180
181 if (!mpool->size)
182 return NULL;
183
184 mutex_lock(&mpool->pool_mutex);
185 if (!mpool->gpool)
186 mpool->gpool = initialize_gpool(mpool->paddr, mpool->size);
187 mutex_unlock(&mpool->pool_mutex);
188 if (!mpool->gpool)
189 return NULL;
190
191 return mpool;
192}
193
194struct mem_pool *initialize_memory_pool(unsigned long start,
195 unsigned long size, int mem_type)
196{
197 int id = mem_type;
198
199 if (id >= MAX_MEMPOOLS || size <= PAGE_SIZE || size % PAGE_SIZE)
200 return NULL;
201
202 mutex_lock(&mpools[id].pool_mutex);
203
204 mpools[id].paddr = start;
205 mpools[id].size = size;
206 mpools[id].free = size;
207 mutex_unlock(&mpools[id].pool_mutex);
208
209 pr_info("memory pool %d (start %lx size %lx) initialized\n",
210 id, start, size);
211 return &mpools[id];
212}
213EXPORT_SYMBOL_GPL(initialize_memory_pool);
214
215void *allocate_contiguous_memory(unsigned long size,
216 int mem_type, unsigned long align, int cached)
217{
218 unsigned long aligned_size = PFN_ALIGN(size);
219 struct mem_pool *mpool;
220
221 mpool = mem_type_to_memory_pool(mem_type);
222 if (!mpool)
223 return NULL;
224 return __alloc(mpool, aligned_size, align, cached);
225
226}
227EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
228
229unsigned long allocate_contiguous_memory_nomap(unsigned long size,
230 int mem_type, unsigned long align)
231{
232 unsigned long paddr;
233 unsigned long aligned_size;
234
235 struct alloc *node;
236 struct mem_pool *mpool;
237 int log_align = ilog2(align);
238
239 mpool = mem_type_to_memory_pool(mem_type);
240 if (!mpool || !mpool->gpool)
241 return 0;
242
243 aligned_size = PFN_ALIGN(size);
244 paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
245 if (!paddr)
246 return 0;
247
248 node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
249 if (!node)
250 goto out;
251
252 node->paddr = paddr;
253
254 /* We search the tree using node->vaddr, so set
255 * it to something unique even though we don't
256 * use it for physical allocation nodes.
257 * The virtual and physical address ranges
258 * are disjoint, so there won't be any chance of
259 * a duplicate node->vaddr value.
260 */
261 node->vaddr = (void *)paddr;
262 node->len = aligned_size;
263 node->mpool = mpool;
264 if (add_alloc(node))
265 goto out_kfree;
266
267 mpool->free -= aligned_size;
268 return paddr;
269out_kfree:
270 kfree(node);
271out:
272 gen_pool_free(mpool->gpool, paddr, aligned_size);
273 return 0;
274}
275EXPORT_SYMBOL_GPL(allocate_contiguous_memory_nomap);
276
277void free_contiguous_memory(void *addr)
278{
279 if (!addr)
280 return;
281 __free(addr, true);
282 return;
283}
284EXPORT_SYMBOL_GPL(free_contiguous_memory);
285
286void free_contiguous_memory_by_paddr(unsigned long paddr)
287{
288 if (!paddr)
289 return;
290 __free((void *)paddr, false);
291 return;
292}
293EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
294
295unsigned long memory_pool_node_paddr(void *vaddr)
296{
297 struct alloc *node = find_alloc(vaddr);
298
299 if (!node)
300 return -EINVAL;
301
302 return node->paddr;
303}
304EXPORT_SYMBOL_GPL(memory_pool_node_paddr);
305
306unsigned long memory_pool_node_len(void *vaddr)
307{
308 struct alloc *node = find_alloc(vaddr);
309
310 if (!node)
311 return -EINVAL;
312
313 return node->len;
314}
315EXPORT_SYMBOL_GPL(memory_pool_node_len);
316
317int __init memory_pool_init(void)
318{
319 int i;
320
321 alloc_root = RB_ROOT;
322 mutex_init(&alloc_mutex);
323 for (i = 0; i < ARRAY_SIZE(mpools); i++) {
324 mutex_init(&mpools[i].pool_mutex);
325 mpools[i].gpool = NULL;
326 }
327 return 0;
328}