blob: 34c16d186dfc365b6b07b4effcc0aca8dcbd5739 [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <mach/msm_subsystem_map.h>
14#include <linux/memory_alloc.h>
15#include <linux/iommu.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070016#include <linux/platform_device.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070017#include <linux/vmalloc.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <asm/sizes.h>
19#include <asm/page.h>
20#include <linux/init.h>
21#include <mach/iommu.h>
22#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070023#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
Olav Haugan8726caf2012-05-10 15:11:35 -070025/* dummy 64K for overmapping */
26char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070027
Laura Abbott0577d7b2012-04-17 11:14:30 -070028struct msm_iommu_domain_state {
29 struct msm_iommu_domain *domains;
30 int ndomains;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031};
32
Laura Abbott0577d7b2012-04-17 11:14:30 -070033static struct msm_iommu_domain_state domain_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
Laura Abbotte956cce2011-10-25 13:33:20 -070035int msm_iommu_map_extra(struct iommu_domain *domain,
36 unsigned long start_iova,
37 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070038 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070039 int cached)
40{
Olav Haugan8726caf2012-05-10 15:11:35 -070041 int i, ret_value = 0;
42 unsigned long order = get_order(page_size);
43 unsigned long aligned_size = ALIGN(size, page_size);
44 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
45 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
46 unsigned long temp_iova = start_iova;
Laura Abbotte956cce2011-10-25 13:33:20 -070047
Olav Haugan8726caf2012-05-10 15:11:35 -070048 for (i = 0; i < nrpages; i++) {
49 int ret = iommu_map(domain, temp_iova, phy_addr, order, cached);
50 if (ret) {
51 pr_err("%s: could not map %lx in domain %p, error: %d\n",
52 __func__, start_iova, domain, ret);
53 ret_value = -EAGAIN;
54 goto out;
55 }
56 temp_iova += page_size;
Laura Abbotte956cce2011-10-25 13:33:20 -070057 }
Olav Haugan8726caf2012-05-10 15:11:35 -070058 return ret_value;
59out:
60 for (; i > 0; --i) {
61 temp_iova -= page_size;
62 iommu_unmap(domain, start_iova, order);
Olav Haugan16cdb412012-03-27 13:02:17 -070063 }
Olav Haugan8726caf2012-05-10 15:11:35 -070064 return ret_value;
65}
Laura Abbotte956cce2011-10-25 13:33:20 -070066
Olav Haugan8726caf2012-05-10 15:11:35 -070067void msm_iommu_unmap_extra(struct iommu_domain *domain,
68 unsigned long start_iova,
69 unsigned long size,
70 unsigned long page_size)
71{
72 int i;
73 unsigned long order = get_order(page_size);
74 unsigned long aligned_size = ALIGN(size, page_size);
75 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
76 unsigned long temp_iova = start_iova;
77
78 for (i = 0; i < nrpages; ++i) {
79 iommu_unmap(domain, temp_iova, order);
80 temp_iova += page_size;
81 }
Laura Abbotte956cce2011-10-25 13:33:20 -070082}
83
Laura Abbottd027fdb2012-04-17 16:22:24 -070084static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
85 unsigned long iova,
86 unsigned long phys,
87 unsigned long size,
88 int cached)
89{
90 int ret;
91 struct scatterlist *sglist;
92
93 sglist = vmalloc(sizeof(*sglist));
94 if (!sglist) {
95 ret = -ENOMEM;
96 goto err1;
97 }
98
99 sg_init_table(sglist, 1);
100 sglist->length = size;
101 sglist->offset = 0;
102 sglist->dma_address = phys;
103
104 ret = iommu_map_range(domain, iova, sglist, size, cached);
105 if (ret) {
106 pr_err("%s: could not map extra %lx in domain %p\n",
107 __func__, iova, domain);
108 }
109
110 vfree(sglist);
111err1:
112 return ret;
113
114}
115
116int msm_iommu_map_contig_buffer(unsigned long phys,
117 unsigned int domain_no,
118 unsigned int partition_no,
119 unsigned long size,
120 unsigned long align,
121 unsigned long cached,
122 unsigned long *iova_val)
123{
124 unsigned long iova;
125 int ret;
126
127 if (size & (align - 1))
128 return -EINVAL;
129
130 iova = msm_allocate_iova_address(domain_no, partition_no, size, align);
131
132 if (!iova)
133 return -ENOMEM;
134
135 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
136 phys, size, cached);
137
138 if (ret)
139 msm_free_iova_address(iova, domain_no, partition_no, size);
140 else
141 *iova_val = iova;
142
143 return ret;
144}
145
146void msm_iommu_unmap_contig_buffer(unsigned long iova,
147 unsigned int domain_no,
148 unsigned int partition_no,
149 unsigned long size)
150{
151 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
152 msm_free_iova_address(iova, domain_no, partition_no, size);
153}
Laura Abbotte956cce2011-10-25 13:33:20 -0700154
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700155struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700157 if (domain_num >= 0 && domain_num < domain_state.ndomains)
158 return domain_state.domains[domain_num].domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700159 else
160 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161}
162
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700163unsigned long msm_allocate_iova_address(unsigned int iommu_domain,
164 unsigned int partition_no,
165 unsigned long size,
166 unsigned long align)
167{
168 struct mem_pool *pool;
169 unsigned long iova;
170
Laura Abbott0577d7b2012-04-17 11:14:30 -0700171 if (iommu_domain >= domain_state.ndomains)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700172 return 0;
173
Laura Abbott0577d7b2012-04-17 11:14:30 -0700174 if (partition_no >= domain_state.domains[iommu_domain].npools)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700175 return 0;
176
Laura Abbott0577d7b2012-04-17 11:14:30 -0700177 pool = &domain_state.domains[iommu_domain].iova_pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700178
179 if (!pool->gpool)
180 return 0;
181
182 iova = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
183 if (iova)
184 pool->free -= size;
185
186 return iova;
187}
188
189void msm_free_iova_address(unsigned long iova,
190 unsigned int iommu_domain,
191 unsigned int partition_no,
192 unsigned long size)
193{
194 struct mem_pool *pool;
195
Laura Abbott0577d7b2012-04-17 11:14:30 -0700196 if (iommu_domain >= domain_state.ndomains) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700197 WARN(1, "Invalid domain %d\n", iommu_domain);
198 return;
199 }
200
Laura Abbott0577d7b2012-04-17 11:14:30 -0700201 if (partition_no >= domain_state.domains[iommu_domain].npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700202 WARN(1, "Invalid partition %d for domain %d\n",
203 partition_no, iommu_domain);
204 return;
205 }
206
Laura Abbott0577d7b2012-04-17 11:14:30 -0700207 pool = &domain_state.domains[iommu_domain].iova_pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700208
209 if (!pool)
210 return;
211
212 pool->free += size;
213 gen_pool_free(pool->gpool, iova, size);
214}
215
216int msm_use_iommu()
217{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700218 /*
219 * If there are no domains, don't bother trying to use the iommu
220 */
221 return domain_state.ndomains && iommu_found();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222}
223
Laura Abbott0577d7b2012-04-17 11:14:30 -0700224static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700226 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700227 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228
Laura Abbott0577d7b2012-04-17 11:14:30 -0700229 if (!p)
230 return -ENODEV;
231
232 domain_state.domains = p->domains;
233 domain_state.ndomains = p->ndomains;
234
235 for (i = 0; i < domain_state.ndomains; i++) {
236 domain_state.domains[i].domain = iommu_domain_alloc(
237 p->domain_alloc_flags);
238 if (!domain_state.domains[i].domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240
Laura Abbott0577d7b2012-04-17 11:14:30 -0700241 for (j = 0; j < domain_state.domains[i].npools; j++) {
242 struct mem_pool *pool = &domain_state.domains[i].
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700243 iova_pools[j];
244 mutex_init(&pool->pool_mutex);
Olav Haugan2d191032012-02-28 09:46:31 -0800245 if (pool->size) {
246 pool->gpool = gen_pool_create(PAGE_SHIFT, -1);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700247
Olav Haugan2d191032012-02-28 09:46:31 -0800248 if (!pool->gpool) {
249 pr_err("%s: could not allocate pool\n",
250 __func__);
251 pr_err("%s: domain %d iova space %d\n",
252 __func__, i, j);
253 continue;
254 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700255
Olav Haugan2d191032012-02-28 09:46:31 -0800256 if (gen_pool_add(pool->gpool, pool->paddr,
257 pool->size, -1)) {
258 pr_err("%s: could not add memory\n",
259 __func__);
260 pr_err("%s: domain %d pool %d\n",
261 __func__, i, j);
262 gen_pool_destroy(pool->gpool);
263 pool->gpool = NULL;
264 continue;
265 }
266 } else {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700267 pool->gpool = NULL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700268 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 }
270 }
271
Laura Abbott0577d7b2012-04-17 11:14:30 -0700272 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273 int domain_idx;
274 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700275 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276
277 if (!ctx)
278 continue;
279
Laura Abbott0577d7b2012-04-17 11:14:30 -0700280 domain_idx = p->domain_names[i].domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281
Laura Abbott0577d7b2012-04-17 11:14:30 -0700282 if (!domain_state.domains[domain_idx].domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 continue;
284
Laura Abbott0577d7b2012-04-17 11:14:30 -0700285 if (iommu_attach_device(domain_state.domains[domain_idx].domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700286 ctx)) {
287 WARN(1, "%s: could not attach domain %d to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 " iommu programming will not occur.\n",
289 __func__, domain_idx,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700290 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 continue;
292 }
293 }
294
295 return 0;
296}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700297
298static struct platform_driver iommu_domain_driver = {
299 .driver = {
300 .name = "iommu_domains",
301 .owner = THIS_MODULE
302 },
303};
304
305static int __init msm_subsystem_iommu_init(void)
306{
307 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
308}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309device_initcall(msm_subsystem_iommu_init);