blob: bd804685f6374ee40868d6c8023a613cefc174ea [file] [log] [blame]
Steve Mucklef132c6c2012-06-06 18:30:57 -07001/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
Sathish Ambleycf045e62012-06-07 12:56:50 -070026#include <linux/of.h>
27#include <linux/of_device.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070028
29#include <asm/sizes.h>
30
31#include <mach/iommu_hw-v2.h>
32#include <mach/iommu.h>
33
34#include "msm_iommu_pagetable.h"
35
36/* bitmap of the page sizes currently supported */
37#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
38
39static DEFINE_MUTEX(msm_iommu_lock);
40
41struct msm_priv {
42 struct iommu_pt pt;
43 struct list_head list_attached;
44};
45
46static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
47{
48 int ret;
49
50 ret = clk_prepare_enable(drvdata->pclk);
51 if (ret)
52 goto fail;
53
54 if (drvdata->clk) {
55 ret = clk_prepare_enable(drvdata->clk);
56 if (ret)
57 clk_disable_unprepare(drvdata->pclk);
58 }
59fail:
60 return ret;
61}
62
63static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
64{
65 if (drvdata->clk)
66 clk_disable_unprepare(drvdata->clk);
67 clk_disable_unprepare(drvdata->pclk);
68}
69
70static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
71{
72 struct msm_priv *priv = domain->priv;
73 struct msm_iommu_drvdata *iommu_drvdata;
74 struct msm_iommu_ctx_drvdata *ctx_drvdata;
75 int asid;
76
77 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
78 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
79
80 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
81 BUG_ON(!iommu_drvdata);
82
83 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
84 ctx_drvdata->num);
85
86 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
87 asid | (va & CB_TLBIVA_VA));
88 mb();
89 }
90
91 return 0;
92}
93
94static int __flush_iotlb(struct iommu_domain *domain)
95{
96 struct msm_priv *priv = domain->priv;
97 struct msm_iommu_drvdata *iommu_drvdata;
98 struct msm_iommu_ctx_drvdata *ctx_drvdata;
99 int asid;
100
101 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
102 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
103
104 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
105 BUG_ON(!iommu_drvdata);
106
107 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
108 ctx_drvdata->num);
109
110 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
111 mb();
112 }
113
114 return 0;
115}
116
Sathish Ambleycf045e62012-06-07 12:56:50 -0700117static void __reset_iommu(void __iomem *base)
118{
119 int i;
120
121 SET_ACR(base, 0);
122 SET_NSACR(base, 0);
123 SET_CR2(base, 0);
124 SET_NSCR2(base, 0);
125 SET_GFAR(base, 0);
126 SET_GFSRRESTORE(base, 0);
127 SET_TLBIALLNSNH(base, 0);
128 SET_PMCR(base, 0);
129 SET_SCR1(base, 0);
130 SET_SSDR_N(base, 0, 0);
131
132 for (i = 0; i < MAX_NUM_SMR; i++)
133 SET_SMR_VALID(base, i, 0);
134
135 mb();
136}
137
138static void __program_iommu(void __iomem *base)
139{
140 __reset_iommu(base);
141
142 SET_CR0_SMCFCFG(base, 1);
143 SET_CR0_USFCFG(base, 1);
144 SET_CR0_STALLD(base, 1);
145 SET_CR0_GCFGFIE(base, 1);
146 SET_CR0_GCFGFRE(base, 1);
147 SET_CR0_GFIE(base, 1);
148 SET_CR0_GFRE(base, 1);
149 SET_CR0_CLIENTPD(base, 0);
150 mb(); /* Make sure writes complete before returning */
151}
152
Steve Mucklef132c6c2012-06-06 18:30:57 -0700153static void __reset_context(void __iomem *base, int ctx)
154{
155 SET_ACTLR(base, ctx, 0);
156 SET_FAR(base, ctx, 0);
157 SET_FSRRESTORE(base, ctx, 0);
158 SET_NMRR(base, ctx, 0);
159 SET_PAR(base, ctx, 0);
160 SET_PRRR(base, ctx, 0);
161 SET_SCTLR(base, ctx, 0);
162 SET_TLBIALL(base, ctx, 0);
163 SET_TTBCR(base, ctx, 0);
164 SET_TTBR0(base, ctx, 0);
165 SET_TTBR1(base, ctx, 0);
166 mb();
167}
168
169static void __program_context(void __iomem *base, int ctx, int ncb,
Sathish Ambleycf045e62012-06-07 12:56:50 -0700170 phys_addr_t pgtable, int redirect,
171 u32 *sids, int len)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700172{
173 unsigned int prrr, nmrr;
174 unsigned int pn;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700175 int i, j, found, num = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700176
177 __reset_context(base, ctx);
178
179 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
180 SET_TTBCR(base, ctx, 0);
181 SET_CB_TTBR0_ADDR(base, ctx, pn);
182
183 /* Enable context fault interrupt */
184 SET_CB_SCTLR_CFIE(base, ctx, 1);
185
186 /* Redirect all cacheable requests to L2 slave port. */
187 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
188 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
189 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
190
191 /* Turn on TEX Remap */
192 SET_CB_SCTLR_TRE(base, ctx, 1);
193
194 /* Enable private ASID namespace */
195 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
196
197 /* Set TEX remap attributes */
198 RCP15_PRRR(prrr);
199 RCP15_NMRR(nmrr);
200 SET_PRRR(base, ctx, prrr);
201 SET_NMRR(base, ctx, nmrr);
202
203 /* Configure page tables as inner-cacheable and shareable to reduce
204 * the TLB miss penalty.
205 */
206 if (redirect) {
207 SET_CB_TTBR0_S(base, ctx, 1);
208 SET_CB_TTBR0_NOS(base, ctx, 1);
209 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
210 SET_CB_TTBR0_IRGN0(base, ctx, 1);
211 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
212 }
213
Sathish Ambleycf045e62012-06-07 12:56:50 -0700214 /* Program the M2V tables for this context */
215 for (i = 0; i < len / sizeof(*sids); i++) {
216 for (; num < MAX_NUM_SMR; num++)
217 if (GET_SMR_VALID(base, num) == 0)
218 break;
219 BUG_ON(num >= MAX_NUM_SMR);
220
221 SET_SMR_VALID(base, num, 1);
222 SET_SMR_MASK(base, num, 0);
223 SET_SMR_ID(base, num, sids[i]);
224
225 /* Set VMID = 0 */
226 SET_S2CR_N(base, num, 0);
227 SET_S2CR_CBNDX(base, num, ctx);
228 /* Set security bit override to be Non-secure */
229 SET_S2CR_NSCFG(base, sids[i], 3);
230
231 SET_CBAR_N(base, ctx, 0);
232 /* Stage 1 Context with Stage 2 bypass */
233 SET_CBAR_TYPE(base, ctx, 1);
234 /* Route page faults to the non-secure interrupt */
235 SET_CBAR_IRPTNDX(base, ctx, 1);
236 }
237
Steve Mucklef132c6c2012-06-06 18:30:57 -0700238 /* Find if this page table is used elsewhere, and re-use ASID */
239 found = 0;
240 for (i = 0; i < ncb; i++)
241 if ((GET_CB_TTBR0_ADDR(base, i) == pn) && (i != ctx)) {
242 SET_CB_CONTEXTIDR_ASID(base, ctx, \
243 GET_CB_CONTEXTIDR_ASID(base, i));
244 found = 1;
245 break;
246 }
247
248 /* If page table is new, find an unused ASID */
249 if (!found) {
250 for (i = 0; i < ncb; i++) {
251 found = 0;
252 for (j = 0; j < ncb; j++) {
253 if (GET_CB_CONTEXTIDR_ASID(base, j) == i &&
254 j != ctx)
255 found = 1;
256 }
257
258 if (!found) {
259 SET_CB_CONTEXTIDR_ASID(base, ctx, i);
260 break;
261 }
262 }
263 BUG_ON(found);
264 }
265
266 /* Enable the MMU */
267 SET_CB_SCTLR_M(base, ctx, 1);
268 mb();
269}
270
271static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
272{
273 struct msm_priv *priv;
274
275 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
276 if (!priv)
277 goto fail_nomem;
278
279#ifdef CONFIG_IOMMU_PGTABLES_L2
280 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
281#endif
282
283 INIT_LIST_HEAD(&priv->list_attached);
284 if (msm_iommu_pagetable_alloc(&priv->pt))
285 goto fail_nomem;
286
287 domain->priv = priv;
288 return 0;
289
290fail_nomem:
291 kfree(priv);
292 return -ENOMEM;
293}
294
295static void msm_iommu_domain_destroy(struct iommu_domain *domain)
296{
297 struct msm_priv *priv;
298
299 mutex_lock(&msm_iommu_lock);
300 priv = domain->priv;
301 domain->priv = NULL;
302
303 if (priv)
304 msm_iommu_pagetable_free(&priv->pt);
305
306 kfree(priv);
307 mutex_unlock(&msm_iommu_lock);
308}
309
Sathish Ambleycf045e62012-06-07 12:56:50 -0700310static int msm_iommu_ctx_attached(struct device *dev)
311{
312 struct platform_device *pdev;
313 struct device_node *child;
314 struct msm_iommu_ctx_drvdata *ctx;
315
316 for_each_child_of_node(dev->of_node, child) {
317 pdev = of_find_device_by_node(child);
318
319 ctx = dev_get_drvdata(&pdev->dev);
320 if (ctx->attached_domain) {
321 of_node_put(child);
322 return 1;
323 }
324 }
325
326 return 0;
327}
328
Steve Mucklef132c6c2012-06-06 18:30:57 -0700329static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
330{
331 struct msm_priv *priv;
332 struct msm_iommu_drvdata *iommu_drvdata;
333 struct msm_iommu_ctx_drvdata *ctx_drvdata;
334 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700335 u32 sids[MAX_NUM_SMR];
336 int len = 0, ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700337
338 mutex_lock(&msm_iommu_lock);
339
340 priv = domain->priv;
341 if (!priv || !dev) {
342 ret = -EINVAL;
343 goto fail;
344 }
345
346 iommu_drvdata = dev_get_drvdata(dev->parent);
347 ctx_drvdata = dev_get_drvdata(dev);
348 if (!iommu_drvdata || !ctx_drvdata) {
349 ret = -EINVAL;
350 goto fail;
351 }
352
353 if (!list_empty(&ctx_drvdata->attached_elm)) {
354 ret = -EBUSY;
355 goto fail;
356 }
357
358 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
359 if (tmp_drvdata == ctx_drvdata) {
360 ret = -EBUSY;
361 goto fail;
362 }
363
Sathish Ambleycf045e62012-06-07 12:56:50 -0700364 of_get_property(dev->of_node, "qcom,iommu-ctx-sids", &len);
365 BUG_ON(len >= sizeof(sids));
366 if (of_property_read_u32_array(dev->of_node, "qcom,iommu-ctx-sids",
367 sids, len / sizeof(*sids))) {
368 ret = -EINVAL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700369 goto fail;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700370 }
371
372 if (!msm_iommu_ctx_attached(dev->parent)) {
373 ret = __enable_clocks(iommu_drvdata);
374 if (ret)
375 goto fail;
376 __program_iommu(iommu_drvdata->base);
377 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700378
379 __program_context(iommu_drvdata->base, ctx_drvdata->num,
380 iommu_drvdata->ncb, __pa(priv->pt.fl_table),
Sathish Ambleycf045e62012-06-07 12:56:50 -0700381 priv->pt.redirect, sids, len);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700382
Steve Mucklef132c6c2012-06-06 18:30:57 -0700383 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
384 ctx_drvdata->attached_domain = domain;
385
386fail:
387 mutex_unlock(&msm_iommu_lock);
388 return ret;
389}
390
391static void msm_iommu_detach_dev(struct iommu_domain *domain,
392 struct device *dev)
393{
394 struct msm_priv *priv;
395 struct msm_iommu_drvdata *iommu_drvdata;
396 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700397
398 mutex_lock(&msm_iommu_lock);
399 priv = domain->priv;
400 if (!priv || !dev)
401 goto fail;
402
403 iommu_drvdata = dev_get_drvdata(dev->parent);
404 ctx_drvdata = dev_get_drvdata(dev);
405 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
406 goto fail;
407
Steve Mucklef132c6c2012-06-06 18:30:57 -0700408 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
409 GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
410
411 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700412 list_del_init(&ctx_drvdata->attached_elm);
413 ctx_drvdata->attached_domain = NULL;
414
Sathish Ambleycf045e62012-06-07 12:56:50 -0700415 if (!msm_iommu_ctx_attached(dev->parent))
416 __disable_clocks(iommu_drvdata);
417
Steve Mucklef132c6c2012-06-06 18:30:57 -0700418fail:
419 mutex_unlock(&msm_iommu_lock);
420}
421
422static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
423 phys_addr_t pa, size_t len, int prot)
424{
425 struct msm_priv *priv;
426 int ret = 0;
427
428 mutex_lock(&msm_iommu_lock);
429
430 priv = domain->priv;
431 if (!priv) {
432 ret = -EINVAL;
433 goto fail;
434 }
435
436 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
437 if (ret)
438 goto fail;
439
440 ret = __flush_iotlb_va(domain, va);
441fail:
442 mutex_unlock(&msm_iommu_lock);
443 return ret;
444}
445
446static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
447 size_t len)
448{
449 struct msm_priv *priv;
450 int ret = -ENODEV;
451
452 mutex_lock(&msm_iommu_lock);
453
454 priv = domain->priv;
455 if (!priv)
456 goto fail;
457
458 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
459 if (ret < 0)
460 goto fail;
461
462 ret = __flush_iotlb_va(domain, va);
463fail:
464 mutex_unlock(&msm_iommu_lock);
465
466 /* the IOMMU API requires us to return how many bytes were unmapped */
467 len = ret ? 0 : len;
468 return len;
469}
470
471static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
472 struct scatterlist *sg, unsigned int len,
473 int prot)
474{
475 int ret;
476 struct msm_priv *priv;
477
478 mutex_lock(&msm_iommu_lock);
479
480 priv = domain->priv;
481 if (!priv) {
482 ret = -EINVAL;
483 goto fail;
484 }
485
486 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
487 if (ret)
488 goto fail;
489
490 __flush_iotlb(domain);
491fail:
492 mutex_unlock(&msm_iommu_lock);
493 return ret;
494}
495
496
497static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
498 unsigned int len)
499{
500 struct msm_priv *priv;
501
502 mutex_lock(&msm_iommu_lock);
503
504 priv = domain->priv;
505 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
506
507 __flush_iotlb(domain);
508 mutex_unlock(&msm_iommu_lock);
509 return 0;
510}
511
512static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
513 unsigned long va)
514{
515 struct msm_priv *priv;
516 struct msm_iommu_drvdata *iommu_drvdata;
517 struct msm_iommu_ctx_drvdata *ctx_drvdata;
518 unsigned int par;
519 void __iomem *base;
520 phys_addr_t pa = 0;
521 int ctx;
522
523 mutex_lock(&msm_iommu_lock);
524
525 priv = domain->priv;
526 if (list_empty(&priv->list_attached))
527 goto fail;
528
529 ctx_drvdata = list_entry(priv->list_attached.next,
530 struct msm_iommu_ctx_drvdata, attached_elm);
531 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
532
533 base = iommu_drvdata->base;
534 ctx = ctx_drvdata->num;
535
536 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
537 mb();
538 while (GET_CB_ATSR_ACTIVE(base, ctx))
539 cpu_relax();
540
541 par = GET_PAR(base, ctx);
542 if (par & CB_PAR_F) {
543 pa = 0;
544 } else {
545 /* We are dealing with a supersection */
546 if (par & CB_PAR_SS)
547 pa = (par & 0xFF000000) | (va & 0x00FFFFFF);
548 else /* Upper 20 bits from PAR, lower 12 from VA */
549 pa = (par & 0xFFFFF000) | (va & 0x00000FFF);
550 }
551
552fail:
553 mutex_unlock(&msm_iommu_lock);
554 return pa;
555}
556
557static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
558 unsigned long cap)
559{
560 return 0;
561}
562
563static void print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
564{
565 pr_err("FAR = %08x PAR = %08x\n",
566 GET_FAR(base, ctx), GET_PAR(base, ctx));
567 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
568 (fsr & 0x02) ? "TF " : "",
569 (fsr & 0x04) ? "AFF " : "",
570 (fsr & 0x08) ? "PF " : "",
571 (fsr & 0x10) ? "EF " : "",
572 (fsr & 0x20) ? "TLBMCF " : "",
573 (fsr & 0x40) ? "TLBLKF " : "",
574 (fsr & 0x80) ? "MHF " : "",
575 (fsr & 0x40000000) ? "SS " : "",
576 (fsr & 0x80000000) ? "MULTI " : "");
577
578 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
579 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
580 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
581 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
582 pr_err("SCTLR = %08x ACTLR = %08x\n",
583 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
584 pr_err("PRRR = %08x NMRR = %08x\n",
585 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
586}
587
588irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
589{
590 struct platform_device *pdev = dev_id;
591 struct msm_iommu_drvdata *drvdata;
592 struct msm_iommu_ctx_drvdata *ctx_drvdata;
593 unsigned int fsr;
594 int ret = IRQ_NONE;
595
596 mutex_lock(&msm_iommu_lock);
597
598 BUG_ON(!pdev);
599
600 drvdata = dev_get_drvdata(pdev->dev.parent);
601 BUG_ON(!drvdata);
602
603 ctx_drvdata = dev_get_drvdata(&pdev->dev);
604 BUG_ON(!ctx_drvdata);
605
606 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
607 if (fsr) {
608 if (!ctx_drvdata->attached_domain) {
609 pr_err("Bad domain in interrupt handler\n");
610 ret = -ENOSYS;
611 } else
612 ret = report_iommu_fault(ctx_drvdata->attached_domain,
613 &ctx_drvdata->pdev->dev,
614 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
615
616 if (ret == -ENOSYS) {
617 pr_err("Unexpected IOMMU page fault!\n");
618 pr_err("name = %s\n", drvdata->name);
619 pr_err("context = %s (%d)\n", ctx_drvdata->name,
620 ctx_drvdata->num);
621 pr_err("Interesting registers:\n");
622 print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
623 }
624
625 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
626 ret = IRQ_HANDLED;
627 } else
628 ret = IRQ_NONE;
629
630 mutex_unlock(&msm_iommu_lock);
631 return ret;
632}
633
634static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
635{
636 struct msm_priv *priv = domain->priv;
637 return __pa(priv->pt.fl_table);
638}
639
640static struct iommu_ops msm_iommu_ops = {
641 .domain_init = msm_iommu_domain_init,
642 .domain_destroy = msm_iommu_domain_destroy,
643 .attach_dev = msm_iommu_attach_dev,
644 .detach_dev = msm_iommu_detach_dev,
645 .map = msm_iommu_map,
646 .unmap = msm_iommu_unmap,
647 .map_range = msm_iommu_map_range,
648 .unmap_range = msm_iommu_unmap_range,
649 .iova_to_phys = msm_iommu_iova_to_phys,
650 .domain_has_cap = msm_iommu_domain_has_cap,
651 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
652 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
653};
654
655static int __init msm_iommu_init(void)
656{
657 msm_iommu_pagetable_init();
658 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
659 return 0;
660}
661
662subsys_initcall(msm_iommu_init);
663
664MODULE_LICENSE("GPL v2");
665MODULE_DESCRIPTION("MSM SMMU v2 Driver");