blob: d1b01301617d13a2372d268d4bc5bcfc006a4ebf [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070018#include <linux/rbtree.h>
19#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <asm/sizes.h>
21#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <mach/iommu.h>
23#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070024#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070025#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Olav Haugan8726caf2012-05-10 15:11:35 -070027/* dummy 64K for overmapping */
28char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070029
Laura Abbottd01221b2012-05-16 17:52:49 -070030struct msm_iova_data {
31 struct rb_node node;
32 struct mem_pool *pools;
33 int npools;
34 struct iommu_domain *domain;
35 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036};
37
Laura Abbottd01221b2012-05-16 17:52:49 -070038static struct rb_root domain_root;
39DEFINE_MUTEX(domain_mutex);
40static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
Laura Abbott2030c1b2012-07-18 06:38:00 -070042int msm_use_iommu()
43{
44 return iommu_present(&platform_bus_type);
45}
46
Laura Abbotte956cce2011-10-25 13:33:20 -070047int msm_iommu_map_extra(struct iommu_domain *domain,
48 unsigned long start_iova,
49 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070050 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070051 int cached)
52{
Olav Haugan5e7befd2012-06-19 14:59:37 -070053 int ret = 0;
54 int i = 0;
Olav Haugan8726caf2012-05-10 15:11:35 -070055 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
56 unsigned long temp_iova = start_iova;
Olav Haugan5e7befd2012-06-19 14:59:37 -070057 if (page_size == SZ_4K) {
58 struct scatterlist *sglist;
59 unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
60 struct page *dummy_page = phys_to_page(phy_addr);
Laura Abbotte956cce2011-10-25 13:33:20 -070061
Iliyan Malchev191a48e2012-07-05 13:38:56 -070062 sglist = kmalloc(sizeof(*sglist) * nrpages, GFP_KERNEL);
Olav Haugan5e7befd2012-06-19 14:59:37 -070063 if (!sglist) {
64 ret = -ENOMEM;
Olav Haugan8726caf2012-05-10 15:11:35 -070065 goto out;
66 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070067
68 sg_init_table(sglist, nrpages);
69
70 for (i = 0; i < nrpages; i++)
71 sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
72
73 ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
74 if (ret) {
75 pr_err("%s: could not map extra %lx in domain %p\n",
76 __func__, start_iova, domain);
77 }
78
Iliyan Malchev191a48e2012-07-05 13:38:56 -070079 kfree(sglist);
Olav Haugan5e7befd2012-06-19 14:59:37 -070080 } else {
81 unsigned long order = get_order(page_size);
82 unsigned long aligned_size = ALIGN(size, page_size);
83 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
84
85 for (i = 0; i < nrpages; i++) {
86 ret = iommu_map(domain, temp_iova, phy_addr, page_size,
87 cached);
88 if (ret) {
89 pr_err("%s: could not map %lx in domain %p, error: %d\n",
90 __func__, start_iova, domain, ret);
91 ret = -EAGAIN;
92 goto out;
93 }
94 temp_iova += page_size;
95 }
Laura Abbotte956cce2011-10-25 13:33:20 -070096 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070097 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -070098out:
99 for (; i > 0; --i) {
100 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700101 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -0700102 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700103 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700104}
Laura Abbotte956cce2011-10-25 13:33:20 -0700105
Olav Haugan8726caf2012-05-10 15:11:35 -0700106void msm_iommu_unmap_extra(struct iommu_domain *domain,
107 unsigned long start_iova,
108 unsigned long size,
109 unsigned long page_size)
110{
111 int i;
112 unsigned long order = get_order(page_size);
113 unsigned long aligned_size = ALIGN(size, page_size);
114 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
115 unsigned long temp_iova = start_iova;
116
117 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700118 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700119 temp_iova += page_size;
120 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700121}
122
Laura Abbottd027fdb2012-04-17 16:22:24 -0700123static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
124 unsigned long iova,
125 unsigned long phys,
126 unsigned long size,
127 int cached)
128{
129 int ret;
130 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700131 int prot = IOMMU_WRITE | IOMMU_READ;
132 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700133
Iliyan Malchev191a48e2012-07-05 13:38:56 -0700134 sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700135 if (!sglist) {
136 ret = -ENOMEM;
137 goto err1;
138 }
139
140 sg_init_table(sglist, 1);
141 sglist->length = size;
142 sglist->offset = 0;
143 sglist->dma_address = phys;
144
Laura Abbotte543cfc2012-06-07 17:51:53 -0700145 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700146 if (ret) {
147 pr_err("%s: could not map extra %lx in domain %p\n",
148 __func__, iova, domain);
149 }
150
Iliyan Malchev191a48e2012-07-05 13:38:56 -0700151 kfree(sglist);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700152err1:
153 return ret;
154
155}
156
157int msm_iommu_map_contig_buffer(unsigned long phys,
158 unsigned int domain_no,
159 unsigned int partition_no,
160 unsigned long size,
161 unsigned long align,
162 unsigned long cached,
163 unsigned long *iova_val)
164{
165 unsigned long iova;
166 int ret;
167
168 if (size & (align - 1))
169 return -EINVAL;
170
Laura Abbott2030c1b2012-07-18 06:38:00 -0700171 if (!msm_use_iommu()) {
172 *iova_val = phys;
173 return 0;
174 }
175
Laura Abbottd01221b2012-05-16 17:52:49 -0700176 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
177 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700178
Laura Abbottd01221b2012-05-16 17:52:49 -0700179 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700180 return -ENOMEM;
181
182 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
183 phys, size, cached);
184
185 if (ret)
186 msm_free_iova_address(iova, domain_no, partition_no, size);
187 else
188 *iova_val = iova;
189
190 return ret;
191}
Laura Abbottb6510ef2012-07-09 09:48:54 -0700192EXPORT_SYMBOL(msm_iommu_map_contig_buffer);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700193
194void msm_iommu_unmap_contig_buffer(unsigned long iova,
195 unsigned int domain_no,
196 unsigned int partition_no,
197 unsigned long size)
198{
Laura Abbott2030c1b2012-07-18 06:38:00 -0700199 if (!msm_use_iommu())
200 return;
201
Laura Abbottd027fdb2012-04-17 16:22:24 -0700202 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
203 msm_free_iova_address(iova, domain_no, partition_no, size);
204}
Laura Abbottb6510ef2012-07-09 09:48:54 -0700205EXPORT_SYMBOL(msm_iommu_unmap_contig_buffer);
Laura Abbotte956cce2011-10-25 13:33:20 -0700206
Laura Abbottd01221b2012-05-16 17:52:49 -0700207static struct msm_iova_data *find_domain(int domain_num)
208{
209 struct rb_root *root = &domain_root;
210 struct rb_node *p = root->rb_node;
211
212 mutex_lock(&domain_mutex);
213
214 while (p) {
215 struct msm_iova_data *node;
216
217 node = rb_entry(p, struct msm_iova_data, node);
218 if (domain_num < node->domain_num)
219 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700220 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700221 p = p->rb_right;
222 else {
223 mutex_unlock(&domain_mutex);
224 return node;
225 }
226 }
227 mutex_unlock(&domain_mutex);
228 return NULL;
229}
230
231static int add_domain(struct msm_iova_data *node)
232{
233 struct rb_root *root = &domain_root;
234 struct rb_node **p = &root->rb_node;
235 struct rb_node *parent = NULL;
236
237 mutex_lock(&domain_mutex);
238 while (*p) {
239 struct msm_iova_data *tmp;
240 parent = *p;
241
242 tmp = rb_entry(parent, struct msm_iova_data, node);
243
244 if (node->domain_num < tmp->domain_num)
245 p = &(*p)->rb_left;
246 else if (node->domain_num > tmp->domain_num)
247 p = &(*p)->rb_right;
248 else
249 BUG();
250 }
251 rb_link_node(&node->node, parent, p);
252 rb_insert_color(&node->node, root);
253 mutex_unlock(&domain_mutex);
254 return 0;
255}
256
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700257struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258{
Laura Abbottd01221b2012-05-16 17:52:49 -0700259 struct msm_iova_data *data;
260
261 data = find_domain(domain_num);
262
263 if (data)
264 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700265 else
266 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267}
268
Laura Abbottd01221b2012-05-16 17:52:49 -0700269int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700270 unsigned int partition_no,
271 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700272 unsigned long align,
273 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700274{
Laura Abbottd01221b2012-05-16 17:52:49 -0700275 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700276 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700277 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700278
Laura Abbottd01221b2012-05-16 17:52:49 -0700279 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700280
Laura Abbottd01221b2012-05-16 17:52:49 -0700281 if (!data)
282 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700283
Laura Abbottd01221b2012-05-16 17:52:49 -0700284 if (partition_no >= data->npools)
285 return -EINVAL;
286
287 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700288
289 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700290 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700291
Laura Abbottd01221b2012-05-16 17:52:49 -0700292 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
293 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700294 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700295 /* Offset because genpool can't handle 0 addresses */
296 if (pool->paddr == 0)
297 va -= SZ_4K;
298 *iova = va;
299 return 0;
300 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700301
Laura Abbottd01221b2012-05-16 17:52:49 -0700302 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700303}
304
305void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700306 unsigned int iommu_domain,
307 unsigned int partition_no,
308 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700309{
Laura Abbottd01221b2012-05-16 17:52:49 -0700310 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700311 struct mem_pool *pool;
312
Laura Abbottd01221b2012-05-16 17:52:49 -0700313 data = find_domain(iommu_domain);
314
315 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700316 WARN(1, "Invalid domain %d\n", iommu_domain);
317 return;
318 }
319
Laura Abbottd01221b2012-05-16 17:52:49 -0700320 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700321 WARN(1, "Invalid partition %d for domain %d\n",
322 partition_no, iommu_domain);
323 return;
324 }
325
Laura Abbottd01221b2012-05-16 17:52:49 -0700326 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700327
328 if (!pool)
329 return;
330
331 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700332
333 /* Offset because genpool can't handle 0 addresses */
334 if (pool->paddr == 0)
335 iova += SZ_4K;
336
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700337 gen_pool_free(pool->gpool, iova, size);
338}
339
Laura Abbottd01221b2012-05-16 17:52:49 -0700340int msm_register_domain(struct msm_iova_layout *layout)
341{
342 int i;
343 struct msm_iova_data *data;
344 struct mem_pool *pools;
345
346 if (!layout)
347 return -EINVAL;
348
349 data = kmalloc(sizeof(*data), GFP_KERNEL);
350
351 if (!data)
352 return -ENOMEM;
353
354 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
355 GFP_KERNEL);
356
357 if (!pools)
358 goto out;
359
360 for (i = 0; i < layout->npartitions; i++) {
361 if (layout->partitions[i].size == 0)
362 continue;
363
364 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
365
366 if (!pools[i].gpool)
367 continue;
368
369 pools[i].paddr = layout->partitions[i].start;
370 pools[i].size = layout->partitions[i].size;
371
372 /*
373 * genalloc can't handle a pool starting at address 0.
374 * For now, solve this problem by offsetting the value
375 * put in by 4k.
376 * gen pool address = actual address + 4k
377 */
378 if (pools[i].paddr == 0)
379 layout->partitions[i].start += SZ_4K;
380
381 if (gen_pool_add(pools[i].gpool,
382 layout->partitions[i].start,
383 layout->partitions[i].size, -1)) {
384 gen_pool_destroy(pools[i].gpool);
385 pools[i].gpool = NULL;
386 continue;
387 }
388 }
389
390 data->pools = pools;
391 data->npools = layout->npartitions;
392 data->domain_num = atomic_inc_return(&domain_nums);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700393 data->domain = iommu_domain_alloc(&platform_bus_type,
394 layout->domain_flags);
Laura Abbottd01221b2012-05-16 17:52:49 -0700395
396 add_domain(data);
397
398 return data->domain_num;
399
400out:
401 kfree(data);
402
403 return -EINVAL;
404}
Laura Abbottb6510ef2012-07-09 09:48:54 -0700405EXPORT_SYMBOL(msm_register_domain);
Laura Abbottd01221b2012-05-16 17:52:49 -0700406
Laura Abbott0577d7b2012-04-17 11:14:30 -0700407static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700409 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700410 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411
Chintan Pandyace27e562012-07-06 23:07:57 +0530412 if (!msm_use_iommu())
413 return -ENODEV;
414
Laura Abbott0577d7b2012-04-17 11:14:30 -0700415 if (!p)
416 return -ENODEV;
417
Laura Abbottd01221b2012-05-16 17:52:49 -0700418 for (i = 0; i < p->ndomains; i++) {
419 struct msm_iova_layout l;
420 struct msm_iova_partition *part;
421 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700422
Laura Abbottd01221b2012-05-16 17:52:49 -0700423 domains = p->domains;
424 l.npartitions = domains[i].npools;
425 part = kmalloc(
426 sizeof(struct msm_iova_partition) * l.npartitions,
427 GFP_KERNEL);
428
429 if (!part) {
430 pr_info("%s: could not allocate space for domain %d",
431 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700434
435 for (j = 0; j < l.npartitions; j++) {
436 part[j].start = p->domains[i].iova_pools[j].paddr;
437 part[j].size = p->domains[i].iova_pools[j].size;
438 }
439
440 l.partitions = part;
441
442 msm_register_domain(&l);
443
444 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445 }
446
Laura Abbott0577d7b2012-04-17 11:14:30 -0700447 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700449 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700450 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
452 if (!ctx)
453 continue;
454
Laura Abbottd01221b2012-05-16 17:52:49 -0700455 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456
Laura Abbottd01221b2012-05-16 17:52:49 -0700457 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 continue;
459
Laura Abbottd01221b2012-05-16 17:52:49 -0700460 if (iommu_attach_device(domain, ctx)) {
461 WARN(1, "%s: could not attach domain %p to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 " iommu programming will not occur.\n",
Laura Abbottd01221b2012-05-16 17:52:49 -0700463 __func__, domain,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700464 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 continue;
466 }
467 }
468
469 return 0;
470}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700471
472static struct platform_driver iommu_domain_driver = {
473 .driver = {
474 .name = "iommu_domains",
475 .owner = THIS_MODULE
476 },
477};
478
479static int __init msm_subsystem_iommu_init(void)
480{
481 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
482}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483device_initcall(msm_subsystem_iommu_init);