blob: 15de300143a48160cc08d1581b3259cff1693d25 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
Sathish Ambleycf045e62012-06-07 12:56:50 -070026#include <linux/of.h>
27#include <linux/of_device.h>
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -070028#include <linux/regulator/consumer.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <asm/sizes.h>
30
31#include <mach/iommu_hw-v2.h>
32#include <mach/iommu.h>
33
34#include "msm_iommu_pagetable.h"
35
36/* bitmap of the page sizes currently supported */
37#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
38
39static DEFINE_MUTEX(msm_iommu_lock);
40
41struct msm_priv {
42 struct iommu_pt pt;
43 struct list_head list_attached;
44};
45
46static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
47{
48 int ret;
49
50 ret = clk_prepare_enable(drvdata->pclk);
51 if (ret)
52 goto fail;
53
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070054 ret = clk_prepare_enable(drvdata->clk);
55 if (ret)
56 clk_disable_unprepare(drvdata->pclk);
57
58 if (drvdata->aclk) {
59 ret = clk_prepare_enable(drvdata->aclk);
60 if (ret) {
61 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -070062 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070063 }
Steve Mucklef132c6c2012-06-06 18:30:57 -070064 }
65fail:
66 return ret;
67}
68
69static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
70{
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070071 if (drvdata->aclk)
72 clk_disable_unprepare(drvdata->aclk);
73 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -070074 clk_disable_unprepare(drvdata->pclk);
75}
76
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -070077static void __sync_tlb(void __iomem *base, int ctx)
78{
79 SET_TLBSYNC(base, ctx, 0);
80
81 /* No barrier needed due to register proximity */
82 while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
83 cpu_relax();
84
85 /* No barrier needed due to read dependency */
86}
87
Steve Mucklef132c6c2012-06-06 18:30:57 -070088static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
89{
90 struct msm_priv *priv = domain->priv;
91 struct msm_iommu_drvdata *iommu_drvdata;
92 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -070093 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -070094 int asid;
95
96 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
97 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
98
99 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
100 BUG_ON(!iommu_drvdata);
101
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700102
103 ret = __enable_clocks(iommu_drvdata);
104 if (ret)
105 goto fail;
106
Steve Mucklef132c6c2012-06-06 18:30:57 -0700107 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
108 ctx_drvdata->num);
109
110 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
111 asid | (va & CB_TLBIVA_VA));
112 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700113 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700114 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700115 }
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700116fail:
117 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700118}
119
120static int __flush_iotlb(struct iommu_domain *domain)
121{
122 struct msm_priv *priv = domain->priv;
123 struct msm_iommu_drvdata *iommu_drvdata;
124 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700125 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700126 int asid;
127
128 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
129 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
130
131 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
132 BUG_ON(!iommu_drvdata);
133
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700134 ret = __enable_clocks(iommu_drvdata);
135 if (ret)
136 goto fail;
137
Steve Mucklef132c6c2012-06-06 18:30:57 -0700138 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
139 ctx_drvdata->num);
140
141 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
142 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700143 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700144 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700145 }
146
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700147fail:
148 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700149}
150
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700151static void __reset_iommu(void __iomem *base, int smt_size)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700152{
153 int i;
154
155 SET_ACR(base, 0);
156 SET_NSACR(base, 0);
157 SET_CR2(base, 0);
158 SET_NSCR2(base, 0);
159 SET_GFAR(base, 0);
160 SET_GFSRRESTORE(base, 0);
161 SET_TLBIALLNSNH(base, 0);
162 SET_PMCR(base, 0);
163 SET_SCR1(base, 0);
164 SET_SSDR_N(base, 0, 0);
165
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700166 for (i = 0; i < smt_size; i++)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700167 SET_SMR_VALID(base, i, 0);
168
169 mb();
170}
171
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700172static void __program_iommu(void __iomem *base, int smt_size)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700173{
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700174 __reset_iommu(base, smt_size);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700175
176 SET_CR0_SMCFCFG(base, 1);
177 SET_CR0_USFCFG(base, 1);
178 SET_CR0_STALLD(base, 1);
179 SET_CR0_GCFGFIE(base, 1);
180 SET_CR0_GCFGFRE(base, 1);
181 SET_CR0_GFIE(base, 1);
182 SET_CR0_GFRE(base, 1);
183 SET_CR0_CLIENTPD(base, 0);
184 mb(); /* Make sure writes complete before returning */
185}
186
Steve Mucklef132c6c2012-06-06 18:30:57 -0700187static void __reset_context(void __iomem *base, int ctx)
188{
189 SET_ACTLR(base, ctx, 0);
190 SET_FAR(base, ctx, 0);
191 SET_FSRRESTORE(base, ctx, 0);
192 SET_NMRR(base, ctx, 0);
193 SET_PAR(base, ctx, 0);
194 SET_PRRR(base, ctx, 0);
195 SET_SCTLR(base, ctx, 0);
196 SET_TLBIALL(base, ctx, 0);
197 SET_TTBCR(base, ctx, 0);
198 SET_TTBR0(base, ctx, 0);
199 SET_TTBR1(base, ctx, 0);
200 mb();
201}
202
203static void __program_context(void __iomem *base, int ctx, int ncb,
Sathish Ambleycf045e62012-06-07 12:56:50 -0700204 phys_addr_t pgtable, int redirect,
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700205 u32 *sids, int len, int smt_size)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700206{
207 unsigned int prrr, nmrr;
208 unsigned int pn;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700209 int i, j, found, num = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700210
211 __reset_context(base, ctx);
212
213 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
214 SET_TTBCR(base, ctx, 0);
215 SET_CB_TTBR0_ADDR(base, ctx, pn);
216
217 /* Enable context fault interrupt */
218 SET_CB_SCTLR_CFIE(base, ctx, 1);
219
220 /* Redirect all cacheable requests to L2 slave port. */
221 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
222 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
223 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
224
225 /* Turn on TEX Remap */
226 SET_CB_SCTLR_TRE(base, ctx, 1);
227
228 /* Enable private ASID namespace */
229 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
230
231 /* Set TEX remap attributes */
232 RCP15_PRRR(prrr);
233 RCP15_NMRR(nmrr);
234 SET_PRRR(base, ctx, prrr);
235 SET_NMRR(base, ctx, nmrr);
236
237 /* Configure page tables as inner-cacheable and shareable to reduce
238 * the TLB miss penalty.
239 */
240 if (redirect) {
241 SET_CB_TTBR0_S(base, ctx, 1);
242 SET_CB_TTBR0_NOS(base, ctx, 1);
243 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
244 SET_CB_TTBR0_IRGN0(base, ctx, 1);
245 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
246 }
247
Sathish Ambleycf045e62012-06-07 12:56:50 -0700248 /* Program the M2V tables for this context */
249 for (i = 0; i < len / sizeof(*sids); i++) {
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700250 for (; num < smt_size; num++)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700251 if (GET_SMR_VALID(base, num) == 0)
252 break;
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700253 BUG_ON(num >= smt_size);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700254
255 SET_SMR_VALID(base, num, 1);
256 SET_SMR_MASK(base, num, 0);
257 SET_SMR_ID(base, num, sids[i]);
258
259 /* Set VMID = 0 */
260 SET_S2CR_N(base, num, 0);
261 SET_S2CR_CBNDX(base, num, ctx);
262 /* Set security bit override to be Non-secure */
Stepan Moskovchenko300c4b32012-08-21 20:50:01 -0700263 SET_S2CR_NSCFG(base, num, 3);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700264 }
265
Stepan Moskovchenko300c4b32012-08-21 20:50:01 -0700266 SET_CBAR_N(base, ctx, 0);
267 /* Stage 1 Context with Stage 2 bypass */
268 SET_CBAR_TYPE(base, ctx, 1);
269 /* Route page faults to the non-secure interrupt */
270 SET_CBAR_IRPTNDX(base, ctx, 1);
271
Steve Mucklef132c6c2012-06-06 18:30:57 -0700272 /* Find if this page table is used elsewhere, and re-use ASID */
273 found = 0;
274 for (i = 0; i < ncb; i++)
275 if ((GET_CB_TTBR0_ADDR(base, i) == pn) && (i != ctx)) {
276 SET_CB_CONTEXTIDR_ASID(base, ctx, \
277 GET_CB_CONTEXTIDR_ASID(base, i));
278 found = 1;
279 break;
280 }
281
282 /* If page table is new, find an unused ASID */
283 if (!found) {
284 for (i = 0; i < ncb; i++) {
285 found = 0;
286 for (j = 0; j < ncb; j++) {
287 if (GET_CB_CONTEXTIDR_ASID(base, j) == i &&
288 j != ctx)
289 found = 1;
290 }
291
292 if (!found) {
293 SET_CB_CONTEXTIDR_ASID(base, ctx, i);
294 break;
295 }
296 }
297 BUG_ON(found);
298 }
299
300 /* Enable the MMU */
301 SET_CB_SCTLR_M(base, ctx, 1);
302 mb();
303}
304
305static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
306{
307 struct msm_priv *priv;
308
309 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
310 if (!priv)
311 goto fail_nomem;
312
313#ifdef CONFIG_IOMMU_PGTABLES_L2
314 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
315#endif
316
317 INIT_LIST_HEAD(&priv->list_attached);
318 if (msm_iommu_pagetable_alloc(&priv->pt))
319 goto fail_nomem;
320
321 domain->priv = priv;
322 return 0;
323
324fail_nomem:
325 kfree(priv);
326 return -ENOMEM;
327}
328
329static void msm_iommu_domain_destroy(struct iommu_domain *domain)
330{
331 struct msm_priv *priv;
332
333 mutex_lock(&msm_iommu_lock);
334 priv = domain->priv;
335 domain->priv = NULL;
336
337 if (priv)
338 msm_iommu_pagetable_free(&priv->pt);
339
340 kfree(priv);
341 mutex_unlock(&msm_iommu_lock);
342}
343
Sathish Ambleycf045e62012-06-07 12:56:50 -0700344static int msm_iommu_ctx_attached(struct device *dev)
345{
346 struct platform_device *pdev;
347 struct device_node *child;
348 struct msm_iommu_ctx_drvdata *ctx;
349
350 for_each_child_of_node(dev->of_node, child) {
351 pdev = of_find_device_by_node(child);
352
353 ctx = dev_get_drvdata(&pdev->dev);
354 if (ctx->attached_domain) {
355 of_node_put(child);
356 return 1;
357 }
358 }
359
360 return 0;
361}
362
Steve Mucklef132c6c2012-06-06 18:30:57 -0700363static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
364{
365 struct msm_priv *priv;
366 struct msm_iommu_drvdata *iommu_drvdata;
367 struct msm_iommu_ctx_drvdata *ctx_drvdata;
368 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700369 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700370
371 mutex_lock(&msm_iommu_lock);
372
373 priv = domain->priv;
374 if (!priv || !dev) {
375 ret = -EINVAL;
376 goto fail;
377 }
378
379 iommu_drvdata = dev_get_drvdata(dev->parent);
380 ctx_drvdata = dev_get_drvdata(dev);
381 if (!iommu_drvdata || !ctx_drvdata) {
382 ret = -EINVAL;
383 goto fail;
384 }
385
386 if (!list_empty(&ctx_drvdata->attached_elm)) {
387 ret = -EBUSY;
388 goto fail;
389 }
390
391 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
392 if (tmp_drvdata == ctx_drvdata) {
393 ret = -EBUSY;
394 goto fail;
395 }
396
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700397 ret = regulator_enable(iommu_drvdata->gdsc);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700398 if (ret)
399 goto fail;
400
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700401 ret = __enable_clocks(iommu_drvdata);
402 if (ret) {
403 regulator_disable(iommu_drvdata->gdsc);
404 goto fail;
405 }
406
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700407 if (!msm_iommu_ctx_attached(dev->parent))
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700408 __program_iommu(iommu_drvdata->base, iommu_drvdata->nsmr);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700409
410 __program_context(iommu_drvdata->base, ctx_drvdata->num,
411 iommu_drvdata->ncb, __pa(priv->pt.fl_table),
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700412 priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid,
413 iommu_drvdata->nsmr);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700414 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700415
Steve Mucklef132c6c2012-06-06 18:30:57 -0700416 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
417 ctx_drvdata->attached_domain = domain;
418
419fail:
420 mutex_unlock(&msm_iommu_lock);
421 return ret;
422}
423
424static void msm_iommu_detach_dev(struct iommu_domain *domain,
425 struct device *dev)
426{
427 struct msm_priv *priv;
428 struct msm_iommu_drvdata *iommu_drvdata;
429 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700430 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700431
432 mutex_lock(&msm_iommu_lock);
433 priv = domain->priv;
434 if (!priv || !dev)
435 goto fail;
436
437 iommu_drvdata = dev_get_drvdata(dev->parent);
438 ctx_drvdata = dev_get_drvdata(dev);
439 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
440 goto fail;
441
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700442 ret = __enable_clocks(iommu_drvdata);
443 if (ret)
444 goto fail;
445
Steve Mucklef132c6c2012-06-06 18:30:57 -0700446 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
447 GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
448
449 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700450 __disable_clocks(iommu_drvdata);
451
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700452 regulator_disable(iommu_drvdata->gdsc);
453
Steve Mucklef132c6c2012-06-06 18:30:57 -0700454 list_del_init(&ctx_drvdata->attached_elm);
455 ctx_drvdata->attached_domain = NULL;
456
Steve Mucklef132c6c2012-06-06 18:30:57 -0700457fail:
458 mutex_unlock(&msm_iommu_lock);
459}
460
461static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
462 phys_addr_t pa, size_t len, int prot)
463{
464 struct msm_priv *priv;
465 int ret = 0;
466
467 mutex_lock(&msm_iommu_lock);
468
469 priv = domain->priv;
470 if (!priv) {
471 ret = -EINVAL;
472 goto fail;
473 }
474
475 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
476 if (ret)
477 goto fail;
478
479 ret = __flush_iotlb_va(domain, va);
480fail:
481 mutex_unlock(&msm_iommu_lock);
482 return ret;
483}
484
485static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
486 size_t len)
487{
488 struct msm_priv *priv;
489 int ret = -ENODEV;
490
491 mutex_lock(&msm_iommu_lock);
492
493 priv = domain->priv;
494 if (!priv)
495 goto fail;
496
497 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
498 if (ret < 0)
499 goto fail;
500
501 ret = __flush_iotlb_va(domain, va);
502fail:
503 mutex_unlock(&msm_iommu_lock);
504
505 /* the IOMMU API requires us to return how many bytes were unmapped */
506 len = ret ? 0 : len;
507 return len;
508}
509
510static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
511 struct scatterlist *sg, unsigned int len,
512 int prot)
513{
514 int ret;
515 struct msm_priv *priv;
516
517 mutex_lock(&msm_iommu_lock);
518
519 priv = domain->priv;
520 if (!priv) {
521 ret = -EINVAL;
522 goto fail;
523 }
524
525 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
526 if (ret)
527 goto fail;
528
529 __flush_iotlb(domain);
530fail:
531 mutex_unlock(&msm_iommu_lock);
532 return ret;
533}
534
535
536static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
537 unsigned int len)
538{
539 struct msm_priv *priv;
540
541 mutex_lock(&msm_iommu_lock);
542
543 priv = domain->priv;
544 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
545
546 __flush_iotlb(domain);
547 mutex_unlock(&msm_iommu_lock);
548 return 0;
549}
550
551static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
552 unsigned long va)
553{
554 struct msm_priv *priv;
555 struct msm_iommu_drvdata *iommu_drvdata;
556 struct msm_iommu_ctx_drvdata *ctx_drvdata;
557 unsigned int par;
558 void __iomem *base;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700559 phys_addr_t ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700560 int ctx;
561
562 mutex_lock(&msm_iommu_lock);
563
564 priv = domain->priv;
565 if (list_empty(&priv->list_attached))
566 goto fail;
567
568 ctx_drvdata = list_entry(priv->list_attached.next,
569 struct msm_iommu_ctx_drvdata, attached_elm);
570 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
571
572 base = iommu_drvdata->base;
573 ctx = ctx_drvdata->num;
574
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700575 ret = __enable_clocks(iommu_drvdata);
576 if (ret) {
577 ret = 0; /* 0 indicates translation failed */
578 goto fail;
579 }
580
Steve Mucklef132c6c2012-06-06 18:30:57 -0700581 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
582 mb();
583 while (GET_CB_ATSR_ACTIVE(base, ctx))
584 cpu_relax();
585
586 par = GET_PAR(base, ctx);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700587 __disable_clocks(iommu_drvdata);
588
Steve Mucklef132c6c2012-06-06 18:30:57 -0700589 if (par & CB_PAR_F) {
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700590 ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700591 } else {
592 /* We are dealing with a supersection */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700593 if (ret & CB_PAR_SS)
594 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700595 else /* Upper 20 bits from PAR, lower 12 from VA */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700596 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700597 }
598
599fail:
600 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700601 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700602}
603
604static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
605 unsigned long cap)
606{
607 return 0;
608}
609
610static void print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
611{
612 pr_err("FAR = %08x PAR = %08x\n",
613 GET_FAR(base, ctx), GET_PAR(base, ctx));
614 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
615 (fsr & 0x02) ? "TF " : "",
616 (fsr & 0x04) ? "AFF " : "",
617 (fsr & 0x08) ? "PF " : "",
618 (fsr & 0x10) ? "EF " : "",
619 (fsr & 0x20) ? "TLBMCF " : "",
620 (fsr & 0x40) ? "TLBLKF " : "",
621 (fsr & 0x80) ? "MHF " : "",
622 (fsr & 0x40000000) ? "SS " : "",
623 (fsr & 0x80000000) ? "MULTI " : "");
624
625 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
626 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
627 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
628 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
629 pr_err("SCTLR = %08x ACTLR = %08x\n",
630 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
631 pr_err("PRRR = %08x NMRR = %08x\n",
632 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
633}
634
635irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
636{
637 struct platform_device *pdev = dev_id;
638 struct msm_iommu_drvdata *drvdata;
639 struct msm_iommu_ctx_drvdata *ctx_drvdata;
640 unsigned int fsr;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700641 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700642
643 mutex_lock(&msm_iommu_lock);
644
645 BUG_ON(!pdev);
646
647 drvdata = dev_get_drvdata(pdev->dev.parent);
648 BUG_ON(!drvdata);
649
650 ctx_drvdata = dev_get_drvdata(&pdev->dev);
651 BUG_ON(!ctx_drvdata);
652
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700653 ret = __enable_clocks(drvdata);
654 if (ret) {
655 ret = IRQ_NONE;
656 goto fail;
657 }
658
Steve Mucklef132c6c2012-06-06 18:30:57 -0700659 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
660 if (fsr) {
661 if (!ctx_drvdata->attached_domain) {
662 pr_err("Bad domain in interrupt handler\n");
663 ret = -ENOSYS;
664 } else
665 ret = report_iommu_fault(ctx_drvdata->attached_domain,
666 &ctx_drvdata->pdev->dev,
667 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
668
669 if (ret == -ENOSYS) {
670 pr_err("Unexpected IOMMU page fault!\n");
671 pr_err("name = %s\n", drvdata->name);
672 pr_err("context = %s (%d)\n", ctx_drvdata->name,
673 ctx_drvdata->num);
674 pr_err("Interesting registers:\n");
675 print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
676 }
677
678 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
679 ret = IRQ_HANDLED;
680 } else
681 ret = IRQ_NONE;
682
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700683 __disable_clocks(drvdata);
684fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700685 mutex_unlock(&msm_iommu_lock);
686 return ret;
687}
688
689static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
690{
691 struct msm_priv *priv = domain->priv;
692 return __pa(priv->pt.fl_table);
693}
694
695static struct iommu_ops msm_iommu_ops = {
696 .domain_init = msm_iommu_domain_init,
697 .domain_destroy = msm_iommu_domain_destroy,
698 .attach_dev = msm_iommu_attach_dev,
699 .detach_dev = msm_iommu_detach_dev,
700 .map = msm_iommu_map,
701 .unmap = msm_iommu_unmap,
702 .map_range = msm_iommu_map_range,
703 .unmap_range = msm_iommu_unmap_range,
704 .iova_to_phys = msm_iommu_iova_to_phys,
705 .domain_has_cap = msm_iommu_domain_has_cap,
706 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
707 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
708};
709
710static int __init msm_iommu_init(void)
711{
712 msm_iommu_pagetable_init();
713 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
714 return 0;
715}
716
717subsys_initcall(msm_iommu_init);
718
719MODULE_LICENSE("GPL v2");
720MODULE_DESCRIPTION("MSM SMMU v2 Driver");