blob: fec27bd067105ec61cd13b39c397336f0b5ecacd [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/vmalloc.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070019#include <linux/rbtree.h>
20#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070021#include <linux/vmalloc.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <asm/sizes.h>
23#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <mach/iommu.h>
25#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070026#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070027#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
Olav Haugan8726caf2012-05-10 15:11:35 -070029/* dummy 64K for overmapping */
30char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070031
Laura Abbottd01221b2012-05-16 17:52:49 -070032struct msm_iova_data {
33 struct rb_node node;
34 struct mem_pool *pools;
35 int npools;
36 struct iommu_domain *domain;
37 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038};
39
Laura Abbottd01221b2012-05-16 17:52:49 -070040static struct rb_root domain_root;
41DEFINE_MUTEX(domain_mutex);
42static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
Laura Abbotte956cce2011-10-25 13:33:20 -070044int msm_iommu_map_extra(struct iommu_domain *domain,
45 unsigned long start_iova,
46 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070047 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070048 int cached)
49{
Olav Haugan8726caf2012-05-10 15:11:35 -070050 int i, ret_value = 0;
51 unsigned long order = get_order(page_size);
52 unsigned long aligned_size = ALIGN(size, page_size);
53 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
54 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
55 unsigned long temp_iova = start_iova;
Laura Abbotte956cce2011-10-25 13:33:20 -070056
Olav Haugan8726caf2012-05-10 15:11:35 -070057 for (i = 0; i < nrpages; i++) {
Steve Mucklef132c6c2012-06-06 18:30:57 -070058 int ret = iommu_map(domain, temp_iova, phy_addr, page_size,
59 cached);
Olav Haugan8726caf2012-05-10 15:11:35 -070060 if (ret) {
61 pr_err("%s: could not map %lx in domain %p, error: %d\n",
62 __func__, start_iova, domain, ret);
63 ret_value = -EAGAIN;
64 goto out;
65 }
66 temp_iova += page_size;
Laura Abbotte956cce2011-10-25 13:33:20 -070067 }
Olav Haugan8726caf2012-05-10 15:11:35 -070068 return ret_value;
69out:
70 for (; i > 0; --i) {
71 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -070072 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -070073 }
Olav Haugan8726caf2012-05-10 15:11:35 -070074 return ret_value;
75}
Laura Abbotte956cce2011-10-25 13:33:20 -070076
Olav Haugan8726caf2012-05-10 15:11:35 -070077void msm_iommu_unmap_extra(struct iommu_domain *domain,
78 unsigned long start_iova,
79 unsigned long size,
80 unsigned long page_size)
81{
82 int i;
83 unsigned long order = get_order(page_size);
84 unsigned long aligned_size = ALIGN(size, page_size);
85 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
86 unsigned long temp_iova = start_iova;
87
88 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -070089 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -070090 temp_iova += page_size;
91 }
Laura Abbotte956cce2011-10-25 13:33:20 -070092}
93
Laura Abbottd027fdb2012-04-17 16:22:24 -070094static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
95 unsigned long iova,
96 unsigned long phys,
97 unsigned long size,
98 int cached)
99{
100 int ret;
101 struct scatterlist *sglist;
102
103 sglist = vmalloc(sizeof(*sglist));
104 if (!sglist) {
105 ret = -ENOMEM;
106 goto err1;
107 }
108
109 sg_init_table(sglist, 1);
110 sglist->length = size;
111 sglist->offset = 0;
112 sglist->dma_address = phys;
113
114 ret = iommu_map_range(domain, iova, sglist, size, cached);
115 if (ret) {
116 pr_err("%s: could not map extra %lx in domain %p\n",
117 __func__, iova, domain);
118 }
119
120 vfree(sglist);
121err1:
122 return ret;
123
124}
125
126int msm_iommu_map_contig_buffer(unsigned long phys,
127 unsigned int domain_no,
128 unsigned int partition_no,
129 unsigned long size,
130 unsigned long align,
131 unsigned long cached,
132 unsigned long *iova_val)
133{
134 unsigned long iova;
135 int ret;
136
137 if (size & (align - 1))
138 return -EINVAL;
139
Laura Abbottd01221b2012-05-16 17:52:49 -0700140 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
141 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700142
Laura Abbottd01221b2012-05-16 17:52:49 -0700143 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700144 return -ENOMEM;
145
146 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
147 phys, size, cached);
148
149 if (ret)
150 msm_free_iova_address(iova, domain_no, partition_no, size);
151 else
152 *iova_val = iova;
153
154 return ret;
155}
156
157void msm_iommu_unmap_contig_buffer(unsigned long iova,
158 unsigned int domain_no,
159 unsigned int partition_no,
160 unsigned long size)
161{
162 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
163 msm_free_iova_address(iova, domain_no, partition_no, size);
164}
Laura Abbotte956cce2011-10-25 13:33:20 -0700165
Laura Abbottd01221b2012-05-16 17:52:49 -0700166static struct msm_iova_data *find_domain(int domain_num)
167{
168 struct rb_root *root = &domain_root;
169 struct rb_node *p = root->rb_node;
170
171 mutex_lock(&domain_mutex);
172
173 while (p) {
174 struct msm_iova_data *node;
175
176 node = rb_entry(p, struct msm_iova_data, node);
177 if (domain_num < node->domain_num)
178 p = p->rb_left;
179 else if (domain_num > domain_num)
180 p = p->rb_right;
181 else {
182 mutex_unlock(&domain_mutex);
183 return node;
184 }
185 }
186 mutex_unlock(&domain_mutex);
187 return NULL;
188}
189
190static int add_domain(struct msm_iova_data *node)
191{
192 struct rb_root *root = &domain_root;
193 struct rb_node **p = &root->rb_node;
194 struct rb_node *parent = NULL;
195
196 mutex_lock(&domain_mutex);
197 while (*p) {
198 struct msm_iova_data *tmp;
199 parent = *p;
200
201 tmp = rb_entry(parent, struct msm_iova_data, node);
202
203 if (node->domain_num < tmp->domain_num)
204 p = &(*p)->rb_left;
205 else if (node->domain_num > tmp->domain_num)
206 p = &(*p)->rb_right;
207 else
208 BUG();
209 }
210 rb_link_node(&node->node, parent, p);
211 rb_insert_color(&node->node, root);
212 mutex_unlock(&domain_mutex);
213 return 0;
214}
215
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700216struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217{
Laura Abbottd01221b2012-05-16 17:52:49 -0700218 struct msm_iova_data *data;
219
220 data = find_domain(domain_num);
221
222 if (data)
223 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700224 else
225 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226}
227
Laura Abbottd01221b2012-05-16 17:52:49 -0700228int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700229 unsigned int partition_no,
230 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700231 unsigned long align,
232 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700233{
Laura Abbottd01221b2012-05-16 17:52:49 -0700234 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700235 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700236 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700237
Laura Abbottd01221b2012-05-16 17:52:49 -0700238 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700239
Laura Abbottd01221b2012-05-16 17:52:49 -0700240 if (!data)
241 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700242
Laura Abbottd01221b2012-05-16 17:52:49 -0700243 if (partition_no >= data->npools)
244 return -EINVAL;
245
246 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700247
248 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700249 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700250
Laura Abbottd01221b2012-05-16 17:52:49 -0700251 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
252 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700253 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700254 /* Offset because genpool can't handle 0 addresses */
255 if (pool->paddr == 0)
256 va -= SZ_4K;
257 *iova = va;
258 return 0;
259 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700260
Laura Abbottd01221b2012-05-16 17:52:49 -0700261 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700262}
263
264void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700265 unsigned int iommu_domain,
266 unsigned int partition_no,
267 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700268{
Laura Abbottd01221b2012-05-16 17:52:49 -0700269 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700270 struct mem_pool *pool;
271
Laura Abbottd01221b2012-05-16 17:52:49 -0700272 data = find_domain(iommu_domain);
273
274 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700275 WARN(1, "Invalid domain %d\n", iommu_domain);
276 return;
277 }
278
Laura Abbottd01221b2012-05-16 17:52:49 -0700279 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700280 WARN(1, "Invalid partition %d for domain %d\n",
281 partition_no, iommu_domain);
282 return;
283 }
284
Laura Abbottd01221b2012-05-16 17:52:49 -0700285 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700286
287 if (!pool)
288 return;
289
290 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700291
292 /* Offset because genpool can't handle 0 addresses */
293 if (pool->paddr == 0)
294 iova += SZ_4K;
295
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700296 gen_pool_free(pool->gpool, iova, size);
297}
298
Laura Abbottd01221b2012-05-16 17:52:49 -0700299int msm_register_domain(struct msm_iova_layout *layout)
300{
301 int i;
302 struct msm_iova_data *data;
303 struct mem_pool *pools;
304
305 if (!layout)
306 return -EINVAL;
307
308 data = kmalloc(sizeof(*data), GFP_KERNEL);
309
310 if (!data)
311 return -ENOMEM;
312
313 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
314 GFP_KERNEL);
315
316 if (!pools)
317 goto out;
318
319 for (i = 0; i < layout->npartitions; i++) {
320 if (layout->partitions[i].size == 0)
321 continue;
322
323 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
324
325 if (!pools[i].gpool)
326 continue;
327
328 pools[i].paddr = layout->partitions[i].start;
329 pools[i].size = layout->partitions[i].size;
330
331 /*
332 * genalloc can't handle a pool starting at address 0.
333 * For now, solve this problem by offsetting the value
334 * put in by 4k.
335 * gen pool address = actual address + 4k
336 */
337 if (pools[i].paddr == 0)
338 layout->partitions[i].start += SZ_4K;
339
340 if (gen_pool_add(pools[i].gpool,
341 layout->partitions[i].start,
342 layout->partitions[i].size, -1)) {
343 gen_pool_destroy(pools[i].gpool);
344 pools[i].gpool = NULL;
345 continue;
346 }
347 }
348
349 data->pools = pools;
350 data->npools = layout->npartitions;
351 data->domain_num = atomic_inc_return(&domain_nums);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700352 data->domain = iommu_domain_alloc(&platform_bus_type,
353 layout->domain_flags);
Laura Abbottd01221b2012-05-16 17:52:49 -0700354
355 add_domain(data);
356
357 return data->domain_num;
358
359out:
360 kfree(data);
361
362 return -EINVAL;
363}
364
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700365int msm_use_iommu()
366{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700367 return iommu_present(&platform_bus_type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368}
369
Laura Abbott0577d7b2012-04-17 11:14:30 -0700370static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700372 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700373 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374
Laura Abbott0577d7b2012-04-17 11:14:30 -0700375 if (!p)
376 return -ENODEV;
377
Laura Abbottd01221b2012-05-16 17:52:49 -0700378 for (i = 0; i < p->ndomains; i++) {
379 struct msm_iova_layout l;
380 struct msm_iova_partition *part;
381 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700382
Laura Abbottd01221b2012-05-16 17:52:49 -0700383 domains = p->domains;
384 l.npartitions = domains[i].npools;
385 part = kmalloc(
386 sizeof(struct msm_iova_partition) * l.npartitions,
387 GFP_KERNEL);
388
389 if (!part) {
390 pr_info("%s: could not allocate space for domain %d",
391 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700394
395 for (j = 0; j < l.npartitions; j++) {
396 part[j].start = p->domains[i].iova_pools[j].paddr;
397 part[j].size = p->domains[i].iova_pools[j].size;
398 }
399
400 l.partitions = part;
401
402 msm_register_domain(&l);
403
404 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 }
406
Laura Abbott0577d7b2012-04-17 11:14:30 -0700407 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700409 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700410 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411
412 if (!ctx)
413 continue;
414
Laura Abbottd01221b2012-05-16 17:52:49 -0700415 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416
Laura Abbottd01221b2012-05-16 17:52:49 -0700417 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 continue;
419
Laura Abbottd01221b2012-05-16 17:52:49 -0700420 if (iommu_attach_device(domain, ctx)) {
421 WARN(1, "%s: could not attach domain %p to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422 " iommu programming will not occur.\n",
Laura Abbottd01221b2012-05-16 17:52:49 -0700423 __func__, domain,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700424 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 continue;
426 }
427 }
428
429 return 0;
430}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700431
432static struct platform_driver iommu_domain_driver = {
433 .driver = {
434 .name = "iommu_domains",
435 .owner = THIS_MODULE
436 },
437};
438
439static int __init msm_subsystem_iommu_init(void)
440{
441 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
442}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443device_initcall(msm_subsystem_iommu_init);