blob: 463d6c9545f04606051b28e8aea532d86a01b857 [file] [log] [blame]
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -080041/* Sharability attributes of MSM IOMMU mappings */
42#define MSM_IOMMU_ATTR_NON_SH 0x0
43#define MSM_IOMMU_ATTR_SH 0x4
44
45/* Cacheability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NONCACHED 0x0
47#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
48#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
49#define MSM_IOMMU_ATTR_CACHED_WT 0x3
50
51
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070052static inline void clean_pte(unsigned long *start, unsigned long *end)
53{
54 dmac_flush_range(start, end);
55}
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070056
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080057static int msm_iommu_tex_class[4];
58
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070059DEFINE_SPINLOCK(msm_iommu_lock);
60
61struct msm_priv {
62 unsigned long *pgtable;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -070063 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070064 struct list_head list_attached;
65};
66
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080067static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
68{
69 int ret;
70
71 ret = clk_enable(drvdata->pclk);
72 if (ret)
73 goto fail;
74
75 if (drvdata->clk) {
76 ret = clk_enable(drvdata->clk);
77 if (ret)
78 clk_disable(drvdata->pclk);
79 }
80fail:
81 return ret;
82}
83
84static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
85{
86 if (drvdata->clk)
87 clk_disable(drvdata->clk);
88 clk_disable(drvdata->pclk);
89}
90
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070091static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
92{
93 struct msm_priv *priv = domain->priv;
94 struct msm_iommu_drvdata *iommu_drvdata;
95 struct msm_iommu_ctx_drvdata *ctx_drvdata;
96 int ret = 0;
97 int asid;
98
99 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
100 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
101 BUG();
102
103 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
104 if (!iommu_drvdata)
105 BUG();
106
107 ret = __enable_clocks(iommu_drvdata);
108 if (ret)
109 goto fail;
110
111 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
112 ctx_drvdata->num);
113
114 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
115 asid | (va & TLBIVA_VA));
116 mb();
117 __disable_clocks(iommu_drvdata);
118 }
119fail:
120 return ret;
121}
122
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700123static int __flush_iotlb(struct iommu_domain *domain)
124{
125 struct msm_priv *priv = domain->priv;
126 struct msm_iommu_drvdata *iommu_drvdata;
127 struct msm_iommu_ctx_drvdata *ctx_drvdata;
128 int ret = 0;
129 int asid;
130
131 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
132 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
133 BUG();
134
135 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
136 if (!iommu_drvdata)
137 BUG();
138
139 ret = __enable_clocks(iommu_drvdata);
140 if (ret)
141 goto fail;
142
143 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
144 ctx_drvdata->num);
145
146 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
147 mb();
148 __disable_clocks(iommu_drvdata);
149 }
150fail:
151 return ret;
152}
153
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700154static void __reset_context(void __iomem *base, int ctx)
155{
156 SET_BPRCOSH(base, ctx, 0);
157 SET_BPRCISH(base, ctx, 0);
158 SET_BPRCNSH(base, ctx, 0);
159 SET_BPSHCFG(base, ctx, 0);
160 SET_BPMTCFG(base, ctx, 0);
161 SET_ACTLR(base, ctx, 0);
162 SET_SCTLR(base, ctx, 0);
163 SET_FSRRESTORE(base, ctx, 0);
164 SET_TTBR0(base, ctx, 0);
165 SET_TTBR1(base, ctx, 0);
166 SET_TTBCR(base, ctx, 0);
167 SET_BFBCR(base, ctx, 0);
168 SET_PAR(base, ctx, 0);
169 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700170 SET_TLBFLPTER(base, ctx, 0);
171 SET_TLBSLPTER(base, ctx, 0);
172 SET_TLBLKCR(base, ctx, 0);
173 SET_PRRR(base, ctx, 0);
174 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700176}
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178static void __program_context(void __iomem *base, int ctx, int ncb,
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700179 phys_addr_t pgtable, int redirect)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700180{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800181 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700183 __reset_context(base, ctx);
184
185 /* Set up HTW mode */
186 /* TLB miss configuration: perform HTW on miss */
187 SET_TLBMCFG(base, ctx, 0x3);
188
189 /* V2P configuration: HTW for access */
190 SET_V2PCFG(base, ctx, 0x3);
191
192 SET_TTBCR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700194
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700195 /* Enable context fault interrupt */
196 SET_CFEIE(base, ctx, 1);
197
198 /* Stall access on a context fault and let the handler deal with it */
199 SET_CFCFG(base, ctx, 1);
200
201 /* Redirect all cacheable requests to L2 slave port. */
202 SET_RCISH(base, ctx, 1);
203 SET_RCOSH(base, ctx, 1);
204 SET_RCNSH(base, ctx, 1);
205
206 /* Turn on TEX Remap */
207 SET_TRE(base, ctx, 1);
208
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800209 /* Set TEX remap attributes */
210 RCP15_PRRR(prrr);
211 RCP15_NMRR(nmrr);
212 SET_PRRR(base, ctx, prrr);
213 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700214
215 /* Turn on BFB prefetch */
216 SET_BFBDFE(base, ctx, 1);
217
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700218 /* Configure page tables as inner-cacheable and shareable to reduce
219 * the TLB miss penalty.
220 */
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700221 if (redirect) {
222 SET_TTBR0_SH(base, ctx, 1);
223 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700224
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700225 SET_TTBR0_NOS(base, ctx, 1);
226 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700227
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700228 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
229 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700230
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700231 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
232 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700233
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700234 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
235 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
236 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 /* Find if this page table is used elsewhere, and re-use ASID */
239 found = 0;
240 for (i = 0; i < ncb; i++)
241 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
242 i != ctx) {
243 SET_CONTEXTIDR_ASID(base, ctx, \
244 GET_CONTEXTIDR_ASID(base, i));
245 found = 1;
246 break;
247 }
248
249 /* If page table is new, find an unused ASID */
250 if (!found) {
251 for (i = 0; i < ncb; i++) {
252 found = 0;
253 for (j = 0; j < ncb; j++) {
254 if (GET_CONTEXTIDR_ASID(base, j) == i &&
255 j != ctx)
256 found = 1;
257 }
258
259 if (!found) {
260 SET_CONTEXTIDR_ASID(base, ctx, i);
261 break;
262 }
263 }
264 BUG_ON(found);
265 }
266
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700267 /* Enable the MMU */
268 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700270}
271
Stepan Moskovchenkoff2d3662011-08-31 17:13:32 -0700272static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700273{
274 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
275
276 if (!priv)
277 goto fail_nomem;
278
279 INIT_LIST_HEAD(&priv->list_attached);
280 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
281 get_order(SZ_16K));
282
283 if (!priv->pgtable)
284 goto fail_nomem;
285
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700286#ifdef CONFIG_IOMMU_PGTABLES_L2
287 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
288#endif
289
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700290 memset(priv->pgtable, 0, SZ_16K);
291 domain->priv = priv;
292 return 0;
293
294fail_nomem:
295 kfree(priv);
296 return -ENOMEM;
297}
298
299static void msm_iommu_domain_destroy(struct iommu_domain *domain)
300{
301 struct msm_priv *priv;
302 unsigned long flags;
303 unsigned long *fl_table;
304 int i;
305
306 spin_lock_irqsave(&msm_iommu_lock, flags);
307 priv = domain->priv;
308 domain->priv = NULL;
309
310 if (priv) {
311 fl_table = priv->pgtable;
312
313 for (i = 0; i < NUM_FL_PTE; i++)
314 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
315 free_page((unsigned long) __va(((fl_table[i]) &
316 FL_BASE_MASK)));
317
318 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
319 priv->pgtable = NULL;
320 }
321
322 kfree(priv);
323 spin_unlock_irqrestore(&msm_iommu_lock, flags);
324}
325
326static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
327{
328 struct msm_priv *priv;
329 struct msm_iommu_ctx_dev *ctx_dev;
330 struct msm_iommu_drvdata *iommu_drvdata;
331 struct msm_iommu_ctx_drvdata *ctx_drvdata;
332 struct msm_iommu_ctx_drvdata *tmp_drvdata;
333 int ret = 0;
334 unsigned long flags;
335
336 spin_lock_irqsave(&msm_iommu_lock, flags);
337
338 priv = domain->priv;
339
340 if (!priv || !dev) {
341 ret = -EINVAL;
342 goto fail;
343 }
344
345 iommu_drvdata = dev_get_drvdata(dev->parent);
346 ctx_drvdata = dev_get_drvdata(dev);
347 ctx_dev = dev->platform_data;
348
349 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
350 ret = -EINVAL;
351 goto fail;
352 }
353
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800354 if (!list_empty(&ctx_drvdata->attached_elm)) {
355 ret = -EBUSY;
356 goto fail;
357 }
358
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700359 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
360 if (tmp_drvdata == ctx_drvdata) {
361 ret = -EBUSY;
362 goto fail;
363 }
364
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800365 ret = __enable_clocks(iommu_drvdata);
366 if (ret)
367 goto fail;
368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700370 __pa(priv->pgtable), priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700371
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800372 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700373 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700374
375fail:
376 spin_unlock_irqrestore(&msm_iommu_lock, flags);
377 return ret;
378}
379
380static void msm_iommu_detach_dev(struct iommu_domain *domain,
381 struct device *dev)
382{
383 struct msm_priv *priv;
384 struct msm_iommu_ctx_dev *ctx_dev;
385 struct msm_iommu_drvdata *iommu_drvdata;
386 struct msm_iommu_ctx_drvdata *ctx_drvdata;
387 unsigned long flags;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800388 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700389
390 spin_lock_irqsave(&msm_iommu_lock, flags);
391 priv = domain->priv;
392
393 if (!priv || !dev)
394 goto fail;
395
396 iommu_drvdata = dev_get_drvdata(dev->parent);
397 ctx_drvdata = dev_get_drvdata(dev);
398 ctx_dev = dev->platform_data;
399
400 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
401 goto fail;
402
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800403 ret = __enable_clocks(iommu_drvdata);
404 if (ret)
405 goto fail;
406
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700407 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
408 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
409
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700410 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800411 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700412 list_del_init(&ctx_drvdata->attached_elm);
413
414fail:
415 spin_unlock_irqrestore(&msm_iommu_lock, flags);
416}
417
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700418static int __get_pgprot(int prot, int len)
419{
420 unsigned int pgprot;
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800421 int tex;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700422
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800423 if (prot & IOMMU_CACHE)
424 tex = (pgprot_kernel >> 2) & 0x07;
425 else
426 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700427
428 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
429 return 0;
430
431 if (len == SZ_16M || len == SZ_1M) {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800432 pgprot = FL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700433 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
434 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
435 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
436 } else {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800437 pgprot = SL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700438 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
439 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
440 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
441 }
442
443 return pgprot;
444}
445
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700446static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
447 phys_addr_t pa, int order, int prot)
448{
449 struct msm_priv *priv;
450 unsigned long flags;
451 unsigned long *fl_table;
452 unsigned long *fl_pte;
453 unsigned long fl_offset;
454 unsigned long *sl_table;
455 unsigned long *sl_pte;
456 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800457 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700458 size_t len = 0x1000UL << order;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700459 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700460
461 spin_lock_irqsave(&msm_iommu_lock, flags);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700462
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800463 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700464 if (!priv) {
465 ret = -EINVAL;
466 goto fail;
467 }
468
469 fl_table = priv->pgtable;
470
471 if (len != SZ_16M && len != SZ_1M &&
472 len != SZ_64K && len != SZ_4K) {
473 pr_debug("Bad size: %d\n", len);
474 ret = -EINVAL;
475 goto fail;
476 }
477
478 if (!fl_table) {
479 pr_debug("Null page table\n");
480 ret = -EINVAL;
481 goto fail;
482 }
483
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700484 pgprot = __get_pgprot(prot, len);
485
486 if (!pgprot) {
487 ret = -EINVAL;
488 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800489 }
490
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700491 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
492 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
493
494 if (len == SZ_16M) {
495 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496
497 for (i = 0; i < 16; i++)
498 if (*(fl_pte+i)) {
499 ret = -EBUSY;
500 goto fail;
501 }
502
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700503 for (i = 0; i < 16; i++)
504 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
505 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800506 FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700507 if (!priv->redirect)
508 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700509 }
510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 if (len == SZ_1M) {
512 if (*fl_pte) {
513 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700514 goto fail;
515 }
516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
518 FL_TYPE_SECT | FL_SHARED | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700519 if (!priv->redirect)
520 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 }
522
523 /* Need a 2nd level table */
524 if (len == SZ_4K || len == SZ_64K) {
525
526 if (*fl_pte == 0) {
527 unsigned long *sl;
528 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
529 get_order(SZ_4K));
530
531 if (!sl) {
532 pr_debug("Could not allocate second level table\n");
533 ret = -ENOMEM;
534 goto fail;
535 }
536 memset(sl, 0, SZ_4K);
537
538 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
539 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700540
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700541 if (!priv->redirect)
542 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 }
544
545 if (!(*fl_pte & FL_TYPE_TABLE)) {
546 ret = -EBUSY;
547 goto fail;
548 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700549 }
550
551 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
552 sl_offset = SL_OFFSET(va);
553 sl_pte = sl_table + sl_offset;
554
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 if (len == SZ_4K) {
556 if (*sl_pte) {
557 ret = -EBUSY;
558 goto fail;
559 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700560
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800561 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800562 SL_SHARED | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700563 if (!priv->redirect)
564 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700566
567 if (len == SZ_64K) {
568 int i;
569
570 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 if (*(sl_pte+i)) {
572 ret = -EBUSY;
573 goto fail;
574 }
575
576 for (i = 0; i < 16; i++)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700577 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800578 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700579
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700580 if (!priv->redirect)
581 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700582 }
583
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700584 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700585fail:
586 spin_unlock_irqrestore(&msm_iommu_lock, flags);
587 return ret;
588}
589
590static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
591 int order)
592{
593 struct msm_priv *priv;
594 unsigned long flags;
595 unsigned long *fl_table;
596 unsigned long *fl_pte;
597 unsigned long fl_offset;
598 unsigned long *sl_table;
599 unsigned long *sl_pte;
600 unsigned long sl_offset;
601 size_t len = 0x1000UL << order;
602 int i, ret = 0;
603
604 spin_lock_irqsave(&msm_iommu_lock, flags);
605
606 priv = domain->priv;
607
608 if (!priv) {
609 ret = -ENODEV;
610 goto fail;
611 }
612
613 fl_table = priv->pgtable;
614
615 if (len != SZ_16M && len != SZ_1M &&
616 len != SZ_64K && len != SZ_4K) {
617 pr_debug("Bad length: %d\n", len);
618 ret = -EINVAL;
619 goto fail;
620 }
621
622 if (!fl_table) {
623 pr_debug("Null page table\n");
624 ret = -EINVAL;
625 goto fail;
626 }
627
628 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
629 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
630
631 if (*fl_pte == 0) {
632 pr_debug("First level PTE is 0\n");
633 ret = -ENODEV;
634 goto fail;
635 }
636
637 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700638 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700639 for (i = 0; i < 16; i++)
640 *(fl_pte+i) = 0;
641
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700642 if (!priv->redirect)
643 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700644 }
645
646 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700647 *fl_pte = 0;
648
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700649 if (!priv->redirect)
650 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700651 }
652
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700653 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
654 sl_offset = SL_OFFSET(va);
655 sl_pte = sl_table + sl_offset;
656
657 if (len == SZ_64K) {
658 for (i = 0; i < 16; i++)
659 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700660
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700661 if (!priv->redirect)
662 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700663 }
664
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700665 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700666 *sl_pte = 0;
667
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700668 if (!priv->redirect)
669 clean_pte(sl_pte, sl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700670 }
671
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700672 if (len == SZ_4K || len == SZ_64K) {
673 int used = 0;
674
675 for (i = 0; i < NUM_SL_PTE; i++)
676 if (sl_table[i])
677 used = 1;
678 if (!used) {
679 free_page((unsigned long)sl_table);
680 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700681
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700682 if (!priv->redirect)
683 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700684 }
685 }
686
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700687 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700688fail:
689 spin_unlock_irqrestore(&msm_iommu_lock, flags);
690 return ret;
691}
692
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700693static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
694 struct scatterlist *sg, unsigned int len,
695 int prot)
696{
697 unsigned int pa;
698 unsigned int offset = 0;
699 unsigned int pgprot;
700 unsigned long *fl_table;
701 unsigned long *fl_pte;
702 unsigned long fl_offset;
703 unsigned long *sl_table;
704 unsigned long sl_offset, sl_start;
705 unsigned long flags;
706 unsigned int chunk_offset = 0;
707 unsigned int chunk_pa;
708 int ret = 0;
709 struct msm_priv *priv;
710
711 spin_lock_irqsave(&msm_iommu_lock, flags);
712
713 BUG_ON(len & (SZ_4K - 1));
714
715 priv = domain->priv;
716 fl_table = priv->pgtable;
717
718 pgprot = __get_pgprot(prot, SZ_4K);
719
720 if (!pgprot) {
721 ret = -EINVAL;
722 goto fail;
723 }
724
725 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
726 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
727
728 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
729 sl_offset = SL_OFFSET(va);
730
731 chunk_pa = sg_phys(sg);
732
733 while (offset < len) {
734 /* Set up a 2nd level page table if one doesn't exist */
735 if (*fl_pte == 0) {
736 sl_table = (unsigned long *)
737 __get_free_pages(GFP_ATOMIC, get_order(SZ_4K));
738
739 if (!sl_table) {
740 pr_debug("Could not allocate second level table\n");
741 ret = -ENOMEM;
742 goto fail;
743 }
744
745 memset(sl_table, 0, SZ_4K);
746 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
747 FL_TYPE_TABLE);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700748 if (!priv->redirect)
749 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700750 } else
751 sl_table = (unsigned long *)
752 __va(((*fl_pte) & FL_BASE_MASK));
753
754 /* Keep track of initial position so we
755 * don't clean more than we have to
756 */
757 sl_start = sl_offset;
758
759 /* Build the 2nd level page table */
760 while (offset < len && sl_offset < NUM_SL_PTE) {
761 pa = chunk_pa + chunk_offset;
762 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
763 pgprot | SL_AP0 | SL_AP1 | SL_NG |
764 SL_SHARED | SL_TYPE_SMALL;
765 sl_offset++;
766 offset += SZ_4K;
767
768 chunk_offset += SZ_4K;
769
770 if (chunk_offset >= sg->length && offset < len) {
771 chunk_offset = 0;
772 sg = sg_next(sg);
773 chunk_pa = sg_phys(sg);
774 }
775 }
776
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700777 if (!priv->redirect)
778 clean_pte(sl_table + sl_start, sl_table + sl_offset);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700779
780 fl_pte++;
781 sl_offset = 0;
782 }
783 __flush_iotlb(domain);
784fail:
785 spin_unlock_irqrestore(&msm_iommu_lock, flags);
786 return ret;
787}
788
789
790static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
791 unsigned int len)
792{
793 unsigned int offset = 0;
794 unsigned long *fl_table;
795 unsigned long *fl_pte;
796 unsigned long fl_offset;
797 unsigned long *sl_table;
798 unsigned long sl_start, sl_end;
799 unsigned long flags;
800 int used, i;
801 struct msm_priv *priv;
802
803 spin_lock_irqsave(&msm_iommu_lock, flags);
804
805 BUG_ON(len & (SZ_4K - 1));
806
807 priv = domain->priv;
808 fl_table = priv->pgtable;
809
810 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
811 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
812
813 sl_start = SL_OFFSET(va);
814
815 while (offset < len) {
816 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
817 sl_end = ((len - offset) / SZ_4K) + sl_start;
818
819 if (sl_end > NUM_SL_PTE)
820 sl_end = NUM_SL_PTE;
821
822 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700823 if (!priv->redirect)
824 clean_pte(sl_table + sl_start, sl_table + sl_end);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700825
826 offset += (sl_end - sl_start) * SZ_4K;
827
828 /* Unmap and free the 2nd level table if all mappings in it
829 * were removed. This saves memory, but the table will need
830 * to be re-allocated the next time someone tries to map these
831 * VAs.
832 */
833 used = 0;
834
835 /* If we just unmapped the whole table, don't bother
836 * seeing if there are still used entries left.
837 */
838 if (sl_end - sl_start != NUM_SL_PTE)
839 for (i = 0; i < NUM_SL_PTE; i++)
840 if (sl_table[i]) {
841 used = 1;
842 break;
843 }
844 if (!used) {
845 free_page((unsigned long)sl_table);
846 *fl_pte = 0;
847
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700848 if (!priv->redirect)
849 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700850 }
851
852 sl_start = 0;
853 fl_pte++;
854 }
855
856 __flush_iotlb(domain);
857 spin_unlock_irqrestore(&msm_iommu_lock, flags);
858 return 0;
859}
860
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700861static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
862 unsigned long va)
863{
864 struct msm_priv *priv;
865 struct msm_iommu_drvdata *iommu_drvdata;
866 struct msm_iommu_ctx_drvdata *ctx_drvdata;
867 unsigned int par;
868 unsigned long flags;
869 void __iomem *base;
870 phys_addr_t ret = 0;
871 int ctx;
872
873 spin_lock_irqsave(&msm_iommu_lock, flags);
874
875 priv = domain->priv;
876 if (list_empty(&priv->list_attached))
877 goto fail;
878
879 ctx_drvdata = list_entry(priv->list_attached.next,
880 struct msm_iommu_ctx_drvdata, attached_elm);
881 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
882
883 base = iommu_drvdata->base;
884 ctx = ctx_drvdata->num;
885
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800886 ret = __enable_clocks(iommu_drvdata);
887 if (ret)
888 goto fail;
889
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800890 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700893 par = GET_PAR(base, ctx);
894
895 /* We are dealing with a supersection */
896 if (GET_NOFAULT_SS(base, ctx))
897 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
898 else /* Upper 20 bits from PAR, lower 12 from VA */
899 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
900
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800901 if (GET_FAULT(base, ctx))
902 ret = 0;
903
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800904 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700905fail:
906 spin_unlock_irqrestore(&msm_iommu_lock, flags);
907 return ret;
908}
909
910static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
911 unsigned long cap)
912{
913 return 0;
914}
915
916static void print_ctx_regs(void __iomem *base, int ctx)
917{
918 unsigned int fsr = GET_FSR(base, ctx);
919 pr_err("FAR = %08x PAR = %08x\n",
920 GET_FAR(base, ctx), GET_PAR(base, ctx));
921 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
922 (fsr & 0x02) ? "TF " : "",
923 (fsr & 0x04) ? "AFF " : "",
924 (fsr & 0x08) ? "APF " : "",
925 (fsr & 0x10) ? "TLBMF " : "",
926 (fsr & 0x20) ? "HTWDEEF " : "",
927 (fsr & 0x40) ? "HTWSEEF " : "",
928 (fsr & 0x80) ? "MHF " : "",
929 (fsr & 0x10000) ? "SL " : "",
930 (fsr & 0x40000000) ? "SS " : "",
931 (fsr & 0x80000000) ? "MULTI " : "");
932
933 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
934 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
935 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
936 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
937 pr_err("SCTLR = %08x ACTLR = %08x\n",
938 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
939 pr_err("PRRR = %08x NMRR = %08x\n",
940 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
941}
942
943irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
944{
945 struct msm_iommu_drvdata *drvdata = dev_id;
946 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800947 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800948 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700949
950 spin_lock(&msm_iommu_lock);
951
952 if (!drvdata) {
953 pr_err("Invalid device ID in context interrupt handler\n");
954 goto fail;
955 }
956
957 base = drvdata->base;
958
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700959 pr_err("Unexpected IOMMU page fault!\n");
960 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700962
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800963 ret = __enable_clocks(drvdata);
964 if (ret)
965 goto fail;
966
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800967 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700968 fsr = GET_FSR(base, i);
969 if (fsr) {
970 pr_err("Fault occurred in context %d.\n", i);
971 pr_err("Interesting registers:\n");
972 print_ctx_regs(base, i);
973 SET_FSR(base, i, 0x4000000F);
974 }
975 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800976 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700977fail:
978 spin_unlock(&msm_iommu_lock);
979 return 0;
980}
981
Shubhraprakash Das4c436f22011-12-02 18:01:57 -0700982static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
983{
984 struct msm_priv *priv = domain->priv;
985 return __pa(priv->pgtable);
986}
987
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700988static struct iommu_ops msm_iommu_ops = {
989 .domain_init = msm_iommu_domain_init,
990 .domain_destroy = msm_iommu_domain_destroy,
991 .attach_dev = msm_iommu_attach_dev,
992 .detach_dev = msm_iommu_detach_dev,
993 .map = msm_iommu_map,
994 .unmap = msm_iommu_unmap,
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700995 .map_range = msm_iommu_map_range,
996 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700997 .iova_to_phys = msm_iommu_iova_to_phys,
Shubhraprakash Das4c436f22011-12-02 18:01:57 -0700998 .domain_has_cap = msm_iommu_domain_has_cap,
999 .get_pt_base_addr = msm_iommu_get_pt_base_addr
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001000};
1001
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001002static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1003{
1004 int i = 0;
1005 unsigned int prrr = 0;
1006 unsigned int nmrr = 0;
1007 int c_icp, c_ocp, c_mt, c_nos;
1008
1009 RCP15_PRRR(prrr);
1010 RCP15_NMRR(nmrr);
1011
1012 for (i = 0; i < NUM_TEX_CLASS; i++) {
1013 c_nos = PRRR_NOS(prrr, i);
1014 c_mt = PRRR_MT(prrr, i);
1015 c_icp = NMRR_ICP(nmrr, i);
1016 c_ocp = NMRR_OCP(nmrr, i);
1017
1018 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1019 return i;
1020 }
1021
1022 return -ENODEV;
1023}
1024
1025static void __init setup_iommu_tex_classes(void)
1026{
1027 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1028 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1029
1030 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1031 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1032
1033 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1034 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1035
1036 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1037 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1038}
1039
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001040static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001041{
Stepan Moskovchenko15f209c2011-10-31 15:32:44 -07001042 if (!msm_soc_version_supports_iommu())
1043 return -ENODEV;
1044
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001045 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001046 register_iommu(&msm_iommu_ops);
1047 return 0;
1048}
1049
1050subsys_initcall(msm_iommu_init);
1051
1052MODULE_LICENSE("GPL v2");
1053MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");