blob: 8e91b4f89c7e9d483cf0c50e1b59351a792fad7c [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070018#include <linux/rbtree.h>
19#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <asm/sizes.h>
21#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <mach/iommu.h>
23#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070024#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070025#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Olav Haugan8726caf2012-05-10 15:11:35 -070027/* dummy 64K for overmapping */
28char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070029
Laura Abbottd01221b2012-05-16 17:52:49 -070030struct msm_iova_data {
31 struct rb_node node;
32 struct mem_pool *pools;
33 int npools;
34 struct iommu_domain *domain;
35 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036};
37
Laura Abbottd01221b2012-05-16 17:52:49 -070038static struct rb_root domain_root;
39DEFINE_MUTEX(domain_mutex);
40static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
Laura Abbotte956cce2011-10-25 13:33:20 -070042int msm_iommu_map_extra(struct iommu_domain *domain,
43 unsigned long start_iova,
44 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070045 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070046 int cached)
47{
Olav Haugan5e7befd2012-06-19 14:59:37 -070048 int ret = 0;
49 int i = 0;
Olav Haugan8726caf2012-05-10 15:11:35 -070050 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
51 unsigned long temp_iova = start_iova;
Olav Haugan5e7befd2012-06-19 14:59:37 -070052 if (page_size == SZ_4K) {
53 struct scatterlist *sglist;
54 unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
55 struct page *dummy_page = phys_to_page(phy_addr);
Laura Abbotte956cce2011-10-25 13:33:20 -070056
Iliyan Malchev191a48e2012-07-05 13:38:56 -070057 sglist = kmalloc(sizeof(*sglist) * nrpages, GFP_KERNEL);
Olav Haugan5e7befd2012-06-19 14:59:37 -070058 if (!sglist) {
59 ret = -ENOMEM;
Olav Haugan8726caf2012-05-10 15:11:35 -070060 goto out;
61 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070062
63 sg_init_table(sglist, nrpages);
64
65 for (i = 0; i < nrpages; i++)
66 sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
67
68 ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
69 if (ret) {
70 pr_err("%s: could not map extra %lx in domain %p\n",
71 __func__, start_iova, domain);
72 }
73
Iliyan Malchev191a48e2012-07-05 13:38:56 -070074 kfree(sglist);
Olav Haugan5e7befd2012-06-19 14:59:37 -070075 } else {
76 unsigned long order = get_order(page_size);
77 unsigned long aligned_size = ALIGN(size, page_size);
78 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
79
80 for (i = 0; i < nrpages; i++) {
81 ret = iommu_map(domain, temp_iova, phy_addr, page_size,
82 cached);
83 if (ret) {
84 pr_err("%s: could not map %lx in domain %p, error: %d\n",
85 __func__, start_iova, domain, ret);
86 ret = -EAGAIN;
87 goto out;
88 }
89 temp_iova += page_size;
90 }
Laura Abbotte956cce2011-10-25 13:33:20 -070091 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070092 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -070093out:
94 for (; i > 0; --i) {
95 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -070096 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -070097 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070098 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -070099}
Laura Abbotte956cce2011-10-25 13:33:20 -0700100
Olav Haugan8726caf2012-05-10 15:11:35 -0700101void msm_iommu_unmap_extra(struct iommu_domain *domain,
102 unsigned long start_iova,
103 unsigned long size,
104 unsigned long page_size)
105{
106 int i;
107 unsigned long order = get_order(page_size);
108 unsigned long aligned_size = ALIGN(size, page_size);
109 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
110 unsigned long temp_iova = start_iova;
111
112 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700113 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700114 temp_iova += page_size;
115 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700116}
117
Laura Abbottd027fdb2012-04-17 16:22:24 -0700118static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
119 unsigned long iova,
120 unsigned long phys,
121 unsigned long size,
122 int cached)
123{
124 int ret;
125 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700126 int prot = IOMMU_WRITE | IOMMU_READ;
127 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700128
Iliyan Malchev191a48e2012-07-05 13:38:56 -0700129 sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700130 if (!sglist) {
131 ret = -ENOMEM;
132 goto err1;
133 }
134
135 sg_init_table(sglist, 1);
136 sglist->length = size;
137 sglist->offset = 0;
138 sglist->dma_address = phys;
139
Laura Abbotte543cfc2012-06-07 17:51:53 -0700140 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700141 if (ret) {
142 pr_err("%s: could not map extra %lx in domain %p\n",
143 __func__, iova, domain);
144 }
145
Iliyan Malchev191a48e2012-07-05 13:38:56 -0700146 kfree(sglist);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700147err1:
148 return ret;
149
150}
151
152int msm_iommu_map_contig_buffer(unsigned long phys,
153 unsigned int domain_no,
154 unsigned int partition_no,
155 unsigned long size,
156 unsigned long align,
157 unsigned long cached,
158 unsigned long *iova_val)
159{
160 unsigned long iova;
161 int ret;
162
163 if (size & (align - 1))
164 return -EINVAL;
165
Laura Abbottd01221b2012-05-16 17:52:49 -0700166 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
167 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700168
Laura Abbottd01221b2012-05-16 17:52:49 -0700169 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700170 return -ENOMEM;
171
172 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
173 phys, size, cached);
174
175 if (ret)
176 msm_free_iova_address(iova, domain_no, partition_no, size);
177 else
178 *iova_val = iova;
179
180 return ret;
181}
182
183void msm_iommu_unmap_contig_buffer(unsigned long iova,
184 unsigned int domain_no,
185 unsigned int partition_no,
186 unsigned long size)
187{
188 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
189 msm_free_iova_address(iova, domain_no, partition_no, size);
190}
Laura Abbotte956cce2011-10-25 13:33:20 -0700191
Laura Abbottd01221b2012-05-16 17:52:49 -0700192static struct msm_iova_data *find_domain(int domain_num)
193{
194 struct rb_root *root = &domain_root;
195 struct rb_node *p = root->rb_node;
196
197 mutex_lock(&domain_mutex);
198
199 while (p) {
200 struct msm_iova_data *node;
201
202 node = rb_entry(p, struct msm_iova_data, node);
203 if (domain_num < node->domain_num)
204 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700205 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700206 p = p->rb_right;
207 else {
208 mutex_unlock(&domain_mutex);
209 return node;
210 }
211 }
212 mutex_unlock(&domain_mutex);
213 return NULL;
214}
215
216static int add_domain(struct msm_iova_data *node)
217{
218 struct rb_root *root = &domain_root;
219 struct rb_node **p = &root->rb_node;
220 struct rb_node *parent = NULL;
221
222 mutex_lock(&domain_mutex);
223 while (*p) {
224 struct msm_iova_data *tmp;
225 parent = *p;
226
227 tmp = rb_entry(parent, struct msm_iova_data, node);
228
229 if (node->domain_num < tmp->domain_num)
230 p = &(*p)->rb_left;
231 else if (node->domain_num > tmp->domain_num)
232 p = &(*p)->rb_right;
233 else
234 BUG();
235 }
236 rb_link_node(&node->node, parent, p);
237 rb_insert_color(&node->node, root);
238 mutex_unlock(&domain_mutex);
239 return 0;
240}
241
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700242struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243{
Laura Abbottd01221b2012-05-16 17:52:49 -0700244 struct msm_iova_data *data;
245
246 data = find_domain(domain_num);
247
248 if (data)
249 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700250 else
251 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252}
253
Laura Abbottd01221b2012-05-16 17:52:49 -0700254int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700255 unsigned int partition_no,
256 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700257 unsigned long align,
258 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700259{
Laura Abbottd01221b2012-05-16 17:52:49 -0700260 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700261 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700262 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700263
Laura Abbottd01221b2012-05-16 17:52:49 -0700264 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700265
Laura Abbottd01221b2012-05-16 17:52:49 -0700266 if (!data)
267 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700268
Laura Abbottd01221b2012-05-16 17:52:49 -0700269 if (partition_no >= data->npools)
270 return -EINVAL;
271
272 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700273
274 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700275 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700276
Laura Abbottd01221b2012-05-16 17:52:49 -0700277 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
278 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700279 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700280 /* Offset because genpool can't handle 0 addresses */
281 if (pool->paddr == 0)
282 va -= SZ_4K;
283 *iova = va;
284 return 0;
285 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700286
Laura Abbottd01221b2012-05-16 17:52:49 -0700287 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700288}
289
290void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700291 unsigned int iommu_domain,
292 unsigned int partition_no,
293 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700294{
Laura Abbottd01221b2012-05-16 17:52:49 -0700295 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700296 struct mem_pool *pool;
297
Laura Abbottd01221b2012-05-16 17:52:49 -0700298 data = find_domain(iommu_domain);
299
300 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700301 WARN(1, "Invalid domain %d\n", iommu_domain);
302 return;
303 }
304
Laura Abbottd01221b2012-05-16 17:52:49 -0700305 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700306 WARN(1, "Invalid partition %d for domain %d\n",
307 partition_no, iommu_domain);
308 return;
309 }
310
Laura Abbottd01221b2012-05-16 17:52:49 -0700311 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700312
313 if (!pool)
314 return;
315
316 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700317
318 /* Offset because genpool can't handle 0 addresses */
319 if (pool->paddr == 0)
320 iova += SZ_4K;
321
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700322 gen_pool_free(pool->gpool, iova, size);
323}
324
Laura Abbottd01221b2012-05-16 17:52:49 -0700325int msm_register_domain(struct msm_iova_layout *layout)
326{
327 int i;
328 struct msm_iova_data *data;
329 struct mem_pool *pools;
330
331 if (!layout)
332 return -EINVAL;
333
334 data = kmalloc(sizeof(*data), GFP_KERNEL);
335
336 if (!data)
337 return -ENOMEM;
338
339 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
340 GFP_KERNEL);
341
342 if (!pools)
343 goto out;
344
345 for (i = 0; i < layout->npartitions; i++) {
346 if (layout->partitions[i].size == 0)
347 continue;
348
349 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
350
351 if (!pools[i].gpool)
352 continue;
353
354 pools[i].paddr = layout->partitions[i].start;
355 pools[i].size = layout->partitions[i].size;
356
357 /*
358 * genalloc can't handle a pool starting at address 0.
359 * For now, solve this problem by offsetting the value
360 * put in by 4k.
361 * gen pool address = actual address + 4k
362 */
363 if (pools[i].paddr == 0)
364 layout->partitions[i].start += SZ_4K;
365
366 if (gen_pool_add(pools[i].gpool,
367 layout->partitions[i].start,
368 layout->partitions[i].size, -1)) {
369 gen_pool_destroy(pools[i].gpool);
370 pools[i].gpool = NULL;
371 continue;
372 }
373 }
374
375 data->pools = pools;
376 data->npools = layout->npartitions;
377 data->domain_num = atomic_inc_return(&domain_nums);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700378 data->domain = iommu_domain_alloc(&platform_bus_type,
379 layout->domain_flags);
Laura Abbottd01221b2012-05-16 17:52:49 -0700380
381 add_domain(data);
382
383 return data->domain_num;
384
385out:
386 kfree(data);
387
388 return -EINVAL;
389}
390
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700391int msm_use_iommu()
392{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700393 return iommu_present(&platform_bus_type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394}
395
Laura Abbott0577d7b2012-04-17 11:14:30 -0700396static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700398 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700399 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400
Laura Abbott0577d7b2012-04-17 11:14:30 -0700401 if (!p)
402 return -ENODEV;
403
Laura Abbottd01221b2012-05-16 17:52:49 -0700404 for (i = 0; i < p->ndomains; i++) {
405 struct msm_iova_layout l;
406 struct msm_iova_partition *part;
407 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700408
Laura Abbottd01221b2012-05-16 17:52:49 -0700409 domains = p->domains;
410 l.npartitions = domains[i].npools;
411 part = kmalloc(
412 sizeof(struct msm_iova_partition) * l.npartitions,
413 GFP_KERNEL);
414
415 if (!part) {
416 pr_info("%s: could not allocate space for domain %d",
417 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700420
421 for (j = 0; j < l.npartitions; j++) {
422 part[j].start = p->domains[i].iova_pools[j].paddr;
423 part[j].size = p->domains[i].iova_pools[j].size;
424 }
425
426 l.partitions = part;
427
428 msm_register_domain(&l);
429
430 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 }
432
Laura Abbott0577d7b2012-04-17 11:14:30 -0700433 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700435 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700436 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437
438 if (!ctx)
439 continue;
440
Laura Abbottd01221b2012-05-16 17:52:49 -0700441 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442
Laura Abbottd01221b2012-05-16 17:52:49 -0700443 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 continue;
445
Laura Abbottd01221b2012-05-16 17:52:49 -0700446 if (iommu_attach_device(domain, ctx)) {
447 WARN(1, "%s: could not attach domain %p to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 " iommu programming will not occur.\n",
Laura Abbottd01221b2012-05-16 17:52:49 -0700449 __func__, domain,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700450 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 continue;
452 }
453 }
454
455 return 0;
456}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700457
458static struct platform_driver iommu_domain_driver = {
459 .driver = {
460 .name = "iommu_domains",
461 .owner = THIS_MODULE
462 },
463};
464
465static int __init msm_subsystem_iommu_init(void)
466{
467 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
468}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469device_initcall(msm_subsystem_iommu_init);