blob: 19e1684095c7ad7348856e0782beb3fe3eb80117 [file] [log] [blame]
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -080041/* Sharability attributes of MSM IOMMU mappings */
42#define MSM_IOMMU_ATTR_NON_SH 0x0
43#define MSM_IOMMU_ATTR_SH 0x4
44
45/* Cacheability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NONCACHED 0x0
47#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
48#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
49#define MSM_IOMMU_ATTR_CACHED_WT 0x3
50
51
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070052static inline void clean_pte(unsigned long *start, unsigned long *end)
53{
54 dmac_flush_range(start, end);
55}
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070056
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080057static int msm_iommu_tex_class[4];
58
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -080059DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070060
61struct msm_priv {
62 unsigned long *pgtable;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -070063 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070064 struct list_head list_attached;
65};
66
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080067static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
68{
69 int ret;
70
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080071 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080072 if (ret)
73 goto fail;
74
75 if (drvdata->clk) {
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080076 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080077 if (ret)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080078 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080079 }
80fail:
81 return ret;
82}
83
84static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
85{
86 if (drvdata->clk)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080087 clk_disable_unprepare(drvdata->clk);
88 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080089}
90
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070091static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
92{
93 struct msm_priv *priv = domain->priv;
94 struct msm_iommu_drvdata *iommu_drvdata;
95 struct msm_iommu_ctx_drvdata *ctx_drvdata;
96 int ret = 0;
97 int asid;
98
99 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
100 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
101 BUG();
102
103 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
104 if (!iommu_drvdata)
105 BUG();
106
107 ret = __enable_clocks(iommu_drvdata);
108 if (ret)
109 goto fail;
110
111 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
112 ctx_drvdata->num);
113
114 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
115 asid | (va & TLBIVA_VA));
116 mb();
117 __disable_clocks(iommu_drvdata);
118 }
119fail:
120 return ret;
121}
122
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700123static int __flush_iotlb(struct iommu_domain *domain)
124{
125 struct msm_priv *priv = domain->priv;
126 struct msm_iommu_drvdata *iommu_drvdata;
127 struct msm_iommu_ctx_drvdata *ctx_drvdata;
128 int ret = 0;
129 int asid;
130
131 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
132 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
133 BUG();
134
135 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
136 if (!iommu_drvdata)
137 BUG();
138
139 ret = __enable_clocks(iommu_drvdata);
140 if (ret)
141 goto fail;
142
143 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
144 ctx_drvdata->num);
145
146 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
147 mb();
148 __disable_clocks(iommu_drvdata);
149 }
150fail:
151 return ret;
152}
153
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700154static void __reset_context(void __iomem *base, int ctx)
155{
156 SET_BPRCOSH(base, ctx, 0);
157 SET_BPRCISH(base, ctx, 0);
158 SET_BPRCNSH(base, ctx, 0);
159 SET_BPSHCFG(base, ctx, 0);
160 SET_BPMTCFG(base, ctx, 0);
161 SET_ACTLR(base, ctx, 0);
162 SET_SCTLR(base, ctx, 0);
163 SET_FSRRESTORE(base, ctx, 0);
164 SET_TTBR0(base, ctx, 0);
165 SET_TTBR1(base, ctx, 0);
166 SET_TTBCR(base, ctx, 0);
167 SET_BFBCR(base, ctx, 0);
168 SET_PAR(base, ctx, 0);
169 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700170 SET_TLBFLPTER(base, ctx, 0);
171 SET_TLBSLPTER(base, ctx, 0);
172 SET_TLBLKCR(base, ctx, 0);
173 SET_PRRR(base, ctx, 0);
174 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700176}
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178static void __program_context(void __iomem *base, int ctx, int ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600179 phys_addr_t pgtable, int redirect,
180 int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700181{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800182 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700184 __reset_context(base, ctx);
185
186 /* Set up HTW mode */
187 /* TLB miss configuration: perform HTW on miss */
188 SET_TLBMCFG(base, ctx, 0x3);
189
190 /* V2P configuration: HTW for access */
191 SET_V2PCFG(base, ctx, 0x3);
192
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600193 SET_TTBCR(base, ctx, ttbr_split);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600195 if (ttbr_split)
196 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700197
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700198 /* Enable context fault interrupt */
199 SET_CFEIE(base, ctx, 1);
200
201 /* Stall access on a context fault and let the handler deal with it */
202 SET_CFCFG(base, ctx, 1);
203
204 /* Redirect all cacheable requests to L2 slave port. */
205 SET_RCISH(base, ctx, 1);
206 SET_RCOSH(base, ctx, 1);
207 SET_RCNSH(base, ctx, 1);
208
209 /* Turn on TEX Remap */
210 SET_TRE(base, ctx, 1);
211
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800212 /* Set TEX remap attributes */
213 RCP15_PRRR(prrr);
214 RCP15_NMRR(nmrr);
215 SET_PRRR(base, ctx, prrr);
216 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700217
218 /* Turn on BFB prefetch */
219 SET_BFBDFE(base, ctx, 1);
220
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700221 /* Configure page tables as inner-cacheable and shareable to reduce
222 * the TLB miss penalty.
223 */
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700224 if (redirect) {
225 SET_TTBR0_SH(base, ctx, 1);
226 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700227
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700228 SET_TTBR0_NOS(base, ctx, 1);
229 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700230
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700231 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
232 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700233
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700234 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
235 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700236
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700237 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
238 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
239 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700240
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 /* Find if this page table is used elsewhere, and re-use ASID */
242 found = 0;
243 for (i = 0; i < ncb; i++)
244 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
245 i != ctx) {
246 SET_CONTEXTIDR_ASID(base, ctx, \
247 GET_CONTEXTIDR_ASID(base, i));
248 found = 1;
249 break;
250 }
251
252 /* If page table is new, find an unused ASID */
253 if (!found) {
254 for (i = 0; i < ncb; i++) {
255 found = 0;
256 for (j = 0; j < ncb; j++) {
257 if (GET_CONTEXTIDR_ASID(base, j) == i &&
258 j != ctx)
259 found = 1;
260 }
261
262 if (!found) {
263 SET_CONTEXTIDR_ASID(base, ctx, i);
264 break;
265 }
266 }
267 BUG_ON(found);
268 }
269
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700270 /* Enable the MMU */
271 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700273}
274
Stepan Moskovchenkoff2d3662011-08-31 17:13:32 -0700275static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700276{
277 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
278
279 if (!priv)
280 goto fail_nomem;
281
282 INIT_LIST_HEAD(&priv->list_attached);
283 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
284 get_order(SZ_16K));
285
286 if (!priv->pgtable)
287 goto fail_nomem;
288
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700289#ifdef CONFIG_IOMMU_PGTABLES_L2
290 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
291#endif
292
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700293 memset(priv->pgtable, 0, SZ_16K);
294 domain->priv = priv;
295 return 0;
296
297fail_nomem:
298 kfree(priv);
299 return -ENOMEM;
300}
301
302static void msm_iommu_domain_destroy(struct iommu_domain *domain)
303{
304 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700305 unsigned long *fl_table;
306 int i;
307
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800308 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700309 priv = domain->priv;
310 domain->priv = NULL;
311
312 if (priv) {
313 fl_table = priv->pgtable;
314
315 for (i = 0; i < NUM_FL_PTE; i++)
316 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
317 free_page((unsigned long) __va(((fl_table[i]) &
318 FL_BASE_MASK)));
319
320 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
321 priv->pgtable = NULL;
322 }
323
324 kfree(priv);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800325 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700326}
327
328static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
329{
330 struct msm_priv *priv;
331 struct msm_iommu_ctx_dev *ctx_dev;
332 struct msm_iommu_drvdata *iommu_drvdata;
333 struct msm_iommu_ctx_drvdata *ctx_drvdata;
334 struct msm_iommu_ctx_drvdata *tmp_drvdata;
335 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700336
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800337 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700338
339 priv = domain->priv;
340
341 if (!priv || !dev) {
342 ret = -EINVAL;
343 goto fail;
344 }
345
346 iommu_drvdata = dev_get_drvdata(dev->parent);
347 ctx_drvdata = dev_get_drvdata(dev);
348 ctx_dev = dev->platform_data;
349
350 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
351 ret = -EINVAL;
352 goto fail;
353 }
354
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800355 if (!list_empty(&ctx_drvdata->attached_elm)) {
356 ret = -EBUSY;
357 goto fail;
358 }
359
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700360 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
361 if (tmp_drvdata == ctx_drvdata) {
362 ret = -EBUSY;
363 goto fail;
364 }
365
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800366 ret = __enable_clocks(iommu_drvdata);
367 if (ret)
368 goto fail;
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600371 __pa(priv->pgtable), priv->redirect,
372 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700373
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800374 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700375 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700376
377fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800378 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700379 return ret;
380}
381
382static void msm_iommu_detach_dev(struct iommu_domain *domain,
383 struct device *dev)
384{
385 struct msm_priv *priv;
386 struct msm_iommu_ctx_dev *ctx_dev;
387 struct msm_iommu_drvdata *iommu_drvdata;
388 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800389 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700390
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800391 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700392 priv = domain->priv;
393
394 if (!priv || !dev)
395 goto fail;
396
397 iommu_drvdata = dev_get_drvdata(dev->parent);
398 ctx_drvdata = dev_get_drvdata(dev);
399 ctx_dev = dev->platform_data;
400
401 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
402 goto fail;
403
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800404 ret = __enable_clocks(iommu_drvdata);
405 if (ret)
406 goto fail;
407
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700408 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
409 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
410
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700411 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800412 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700413 list_del_init(&ctx_drvdata->attached_elm);
414
415fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800416 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700417}
418
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700419static int __get_pgprot(int prot, int len)
420{
421 unsigned int pgprot;
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800422 int tex;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700423
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700424 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
425 prot |= IOMMU_READ | IOMMU_WRITE;
426 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
427 }
428
429 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
430 prot |= IOMMU_READ;
431 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
432 }
433
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800434 if (prot & IOMMU_CACHE)
435 tex = (pgprot_kernel >> 2) & 0x07;
436 else
437 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700438
439 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
440 return 0;
441
442 if (len == SZ_16M || len == SZ_1M) {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800443 pgprot = FL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700444 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
445 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
446 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700447 pgprot |= FL_AP0 | FL_AP1;
448 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700449 } else {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800450 pgprot = SL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700451 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
452 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
453 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700454 pgprot |= SL_AP0 | SL_AP1;
455 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700456 }
457
458 return pgprot;
459}
460
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700461static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
462 phys_addr_t pa, int order, int prot)
463{
464 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700465 unsigned long *fl_table;
466 unsigned long *fl_pte;
467 unsigned long fl_offset;
468 unsigned long *sl_table;
469 unsigned long *sl_pte;
470 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800471 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700472 size_t len = 0x1000UL << order;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700473 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700474
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800475 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700476
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800477 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700478 if (!priv) {
479 ret = -EINVAL;
480 goto fail;
481 }
482
483 fl_table = priv->pgtable;
484
485 if (len != SZ_16M && len != SZ_1M &&
486 len != SZ_64K && len != SZ_4K) {
487 pr_debug("Bad size: %d\n", len);
488 ret = -EINVAL;
489 goto fail;
490 }
491
492 if (!fl_table) {
493 pr_debug("Null page table\n");
494 ret = -EINVAL;
495 goto fail;
496 }
497
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700498 pgprot = __get_pgprot(prot, len);
499
500 if (!pgprot) {
501 ret = -EINVAL;
502 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800503 }
504
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700505 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
506 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
507
508 if (len == SZ_16M) {
509 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510
511 for (i = 0; i < 16; i++)
512 if (*(fl_pte+i)) {
513 ret = -EBUSY;
514 goto fail;
515 }
516
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700517 for (i = 0; i < 16; i++)
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700518 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
519 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700520 if (!priv->redirect)
521 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700522 }
523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 if (len == SZ_1M) {
525 if (*fl_pte) {
526 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700527 goto fail;
528 }
529
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700530 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
531 | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700532 if (!priv->redirect)
533 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 }
535
536 /* Need a 2nd level table */
537 if (len == SZ_4K || len == SZ_64K) {
538
539 if (*fl_pte == 0) {
540 unsigned long *sl;
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800541 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 get_order(SZ_4K));
543
544 if (!sl) {
545 pr_debug("Could not allocate second level table\n");
546 ret = -ENOMEM;
547 goto fail;
548 }
549 memset(sl, 0, SZ_4K);
550
551 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
552 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700553
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700554 if (!priv->redirect)
555 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556 }
557
558 if (!(*fl_pte & FL_TYPE_TABLE)) {
559 ret = -EBUSY;
560 goto fail;
561 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700562 }
563
564 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
565 sl_offset = SL_OFFSET(va);
566 sl_pte = sl_table + sl_offset;
567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 if (len == SZ_4K) {
569 if (*sl_pte) {
570 ret = -EBUSY;
571 goto fail;
572 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700573
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700574 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
575 | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700576 if (!priv->redirect)
577 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700579
580 if (len == SZ_64K) {
581 int i;
582
583 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 if (*(sl_pte+i)) {
585 ret = -EBUSY;
586 goto fail;
587 }
588
589 for (i = 0; i < 16; i++)
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700590 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
591 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700592
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700593 if (!priv->redirect)
594 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700595 }
596
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700597 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700598fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800599 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700600 return ret;
601}
602
603static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
604 int order)
605{
606 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700607 unsigned long *fl_table;
608 unsigned long *fl_pte;
609 unsigned long fl_offset;
610 unsigned long *sl_table;
611 unsigned long *sl_pte;
612 unsigned long sl_offset;
613 size_t len = 0x1000UL << order;
614 int i, ret = 0;
615
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800616 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700617
618 priv = domain->priv;
619
620 if (!priv) {
621 ret = -ENODEV;
622 goto fail;
623 }
624
625 fl_table = priv->pgtable;
626
627 if (len != SZ_16M && len != SZ_1M &&
628 len != SZ_64K && len != SZ_4K) {
629 pr_debug("Bad length: %d\n", len);
630 ret = -EINVAL;
631 goto fail;
632 }
633
634 if (!fl_table) {
635 pr_debug("Null page table\n");
636 ret = -EINVAL;
637 goto fail;
638 }
639
640 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
641 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
642
643 if (*fl_pte == 0) {
644 pr_debug("First level PTE is 0\n");
645 ret = -ENODEV;
646 goto fail;
647 }
648
649 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700650 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700651 for (i = 0; i < 16; i++)
652 *(fl_pte+i) = 0;
653
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700654 if (!priv->redirect)
655 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700656 }
657
658 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700659 *fl_pte = 0;
660
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700661 if (!priv->redirect)
662 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700663 }
664
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700665 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
666 sl_offset = SL_OFFSET(va);
667 sl_pte = sl_table + sl_offset;
668
669 if (len == SZ_64K) {
670 for (i = 0; i < 16; i++)
671 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700672
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700673 if (!priv->redirect)
674 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700675 }
676
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700677 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700678 *sl_pte = 0;
679
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700680 if (!priv->redirect)
681 clean_pte(sl_pte, sl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700682 }
683
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700684 if (len == SZ_4K || len == SZ_64K) {
685 int used = 0;
686
687 for (i = 0; i < NUM_SL_PTE; i++)
688 if (sl_table[i])
689 used = 1;
690 if (!used) {
691 free_page((unsigned long)sl_table);
692 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700693
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700694 if (!priv->redirect)
695 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700696 }
697 }
698
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700699 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700700fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800701 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700702 return ret;
703}
704
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600705static unsigned int get_phys_addr(struct scatterlist *sg)
706{
707 /*
708 * Try sg_dma_address first so that we can
709 * map carveout regions that do not have a
710 * struct page associated with them.
711 */
712 unsigned int pa = sg_dma_address(sg);
713 if (pa == 0)
714 pa = sg_phys(sg);
715 return pa;
716}
717
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700718static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
719 struct scatterlist *sg, unsigned int len,
720 int prot)
721{
722 unsigned int pa;
723 unsigned int offset = 0;
724 unsigned int pgprot;
725 unsigned long *fl_table;
726 unsigned long *fl_pte;
727 unsigned long fl_offset;
728 unsigned long *sl_table;
729 unsigned long sl_offset, sl_start;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700730 unsigned int chunk_offset = 0;
731 unsigned int chunk_pa;
732 int ret = 0;
733 struct msm_priv *priv;
734
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800735 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700736
737 BUG_ON(len & (SZ_4K - 1));
738
739 priv = domain->priv;
740 fl_table = priv->pgtable;
741
742 pgprot = __get_pgprot(prot, SZ_4K);
743
744 if (!pgprot) {
745 ret = -EINVAL;
746 goto fail;
747 }
748
749 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
750 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
751
752 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
753 sl_offset = SL_OFFSET(va);
754
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600755 chunk_pa = get_phys_addr(sg);
756 if (chunk_pa == 0) {
757 pr_debug("No dma address for sg %p\n", sg);
758 ret = -EINVAL;
759 goto fail;
760 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700761
762 while (offset < len) {
763 /* Set up a 2nd level page table if one doesn't exist */
764 if (*fl_pte == 0) {
765 sl_table = (unsigned long *)
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800766 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700767
768 if (!sl_table) {
769 pr_debug("Could not allocate second level table\n");
770 ret = -ENOMEM;
771 goto fail;
772 }
773
774 memset(sl_table, 0, SZ_4K);
775 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
776 FL_TYPE_TABLE);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700777 if (!priv->redirect)
778 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700779 } else
780 sl_table = (unsigned long *)
781 __va(((*fl_pte) & FL_BASE_MASK));
782
783 /* Keep track of initial position so we
784 * don't clean more than we have to
785 */
786 sl_start = sl_offset;
787
788 /* Build the 2nd level page table */
789 while (offset < len && sl_offset < NUM_SL_PTE) {
790 pa = chunk_pa + chunk_offset;
791 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700792 pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700793 sl_offset++;
794 offset += SZ_4K;
795
796 chunk_offset += SZ_4K;
797
798 if (chunk_offset >= sg->length && offset < len) {
799 chunk_offset = 0;
800 sg = sg_next(sg);
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600801 chunk_pa = get_phys_addr(sg);
802 if (chunk_pa == 0) {
803 pr_debug("No dma address for sg %p\n",
804 sg);
805 ret = -EINVAL;
806 goto fail;
807 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700808 }
809 }
810
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700811 if (!priv->redirect)
812 clean_pte(sl_table + sl_start, sl_table + sl_offset);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700813
814 fl_pte++;
815 sl_offset = 0;
816 }
817 __flush_iotlb(domain);
818fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800819 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700820 return ret;
821}
822
823
824static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
825 unsigned int len)
826{
827 unsigned int offset = 0;
828 unsigned long *fl_table;
829 unsigned long *fl_pte;
830 unsigned long fl_offset;
831 unsigned long *sl_table;
832 unsigned long sl_start, sl_end;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700833 int used, i;
834 struct msm_priv *priv;
835
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800836 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700837
838 BUG_ON(len & (SZ_4K - 1));
839
840 priv = domain->priv;
841 fl_table = priv->pgtable;
842
843 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
844 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
845
846 sl_start = SL_OFFSET(va);
847
848 while (offset < len) {
849 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
850 sl_end = ((len - offset) / SZ_4K) + sl_start;
851
852 if (sl_end > NUM_SL_PTE)
853 sl_end = NUM_SL_PTE;
854
855 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700856 if (!priv->redirect)
857 clean_pte(sl_table + sl_start, sl_table + sl_end);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700858
859 offset += (sl_end - sl_start) * SZ_4K;
860
861 /* Unmap and free the 2nd level table if all mappings in it
862 * were removed. This saves memory, but the table will need
863 * to be re-allocated the next time someone tries to map these
864 * VAs.
865 */
866 used = 0;
867
868 /* If we just unmapped the whole table, don't bother
869 * seeing if there are still used entries left.
870 */
871 if (sl_end - sl_start != NUM_SL_PTE)
872 for (i = 0; i < NUM_SL_PTE; i++)
873 if (sl_table[i]) {
874 used = 1;
875 break;
876 }
877 if (!used) {
878 free_page((unsigned long)sl_table);
879 *fl_pte = 0;
880
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700881 if (!priv->redirect)
882 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700883 }
884
885 sl_start = 0;
886 fl_pte++;
887 }
888
889 __flush_iotlb(domain);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800890 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700891 return 0;
892}
893
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700894static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
895 unsigned long va)
896{
897 struct msm_priv *priv;
898 struct msm_iommu_drvdata *iommu_drvdata;
899 struct msm_iommu_ctx_drvdata *ctx_drvdata;
900 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700901 void __iomem *base;
902 phys_addr_t ret = 0;
903 int ctx;
904
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800905 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700906
907 priv = domain->priv;
908 if (list_empty(&priv->list_attached))
909 goto fail;
910
911 ctx_drvdata = list_entry(priv->list_attached.next,
912 struct msm_iommu_ctx_drvdata, attached_elm);
913 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
914
915 base = iommu_drvdata->base;
916 ctx = ctx_drvdata->num;
917
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800918 ret = __enable_clocks(iommu_drvdata);
919 if (ret)
920 goto fail;
921
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800922 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700925 par = GET_PAR(base, ctx);
926
927 /* We are dealing with a supersection */
928 if (GET_NOFAULT_SS(base, ctx))
929 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
930 else /* Upper 20 bits from PAR, lower 12 from VA */
931 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
932
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800933 if (GET_FAULT(base, ctx))
934 ret = 0;
935
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800936 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700937fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800938 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700939 return ret;
940}
941
942static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
943 unsigned long cap)
944{
945 return 0;
946}
947
948static void print_ctx_regs(void __iomem *base, int ctx)
949{
950 unsigned int fsr = GET_FSR(base, ctx);
951 pr_err("FAR = %08x PAR = %08x\n",
952 GET_FAR(base, ctx), GET_PAR(base, ctx));
953 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
954 (fsr & 0x02) ? "TF " : "",
955 (fsr & 0x04) ? "AFF " : "",
956 (fsr & 0x08) ? "APF " : "",
957 (fsr & 0x10) ? "TLBMF " : "",
958 (fsr & 0x20) ? "HTWDEEF " : "",
959 (fsr & 0x40) ? "HTWSEEF " : "",
960 (fsr & 0x80) ? "MHF " : "",
961 (fsr & 0x10000) ? "SL " : "",
962 (fsr & 0x40000000) ? "SS " : "",
963 (fsr & 0x80000000) ? "MULTI " : "");
964
965 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
966 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
967 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
968 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
969 pr_err("SCTLR = %08x ACTLR = %08x\n",
970 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
971 pr_err("PRRR = %08x NMRR = %08x\n",
972 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
973}
974
975irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
976{
977 struct msm_iommu_drvdata *drvdata = dev_id;
978 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800979 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800980 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700981
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800982 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700983
984 if (!drvdata) {
985 pr_err("Invalid device ID in context interrupt handler\n");
986 goto fail;
987 }
988
989 base = drvdata->base;
990
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700991 pr_err("Unexpected IOMMU page fault!\n");
992 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700994
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800995 ret = __enable_clocks(drvdata);
996 if (ret)
997 goto fail;
998
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800999 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001000 fsr = GET_FSR(base, i);
1001 if (fsr) {
1002 pr_err("Fault occurred in context %d.\n", i);
1003 pr_err("Interesting registers:\n");
1004 print_ctx_regs(base, i);
1005 SET_FSR(base, i, 0x4000000F);
1006 }
1007 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001008 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001009fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -08001010 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001011 return 0;
1012}
1013
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001014static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1015{
1016 struct msm_priv *priv = domain->priv;
1017 return __pa(priv->pgtable);
1018}
1019
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001020static struct iommu_ops msm_iommu_ops = {
1021 .domain_init = msm_iommu_domain_init,
1022 .domain_destroy = msm_iommu_domain_destroy,
1023 .attach_dev = msm_iommu_attach_dev,
1024 .detach_dev = msm_iommu_detach_dev,
1025 .map = msm_iommu_map,
1026 .unmap = msm_iommu_unmap,
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -07001027 .map_range = msm_iommu_map_range,
1028 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001029 .iova_to_phys = msm_iommu_iova_to_phys,
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001030 .domain_has_cap = msm_iommu_domain_has_cap,
1031 .get_pt_base_addr = msm_iommu_get_pt_base_addr
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001032};
1033
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001034static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1035{
1036 int i = 0;
1037 unsigned int prrr = 0;
1038 unsigned int nmrr = 0;
1039 int c_icp, c_ocp, c_mt, c_nos;
1040
1041 RCP15_PRRR(prrr);
1042 RCP15_NMRR(nmrr);
1043
1044 for (i = 0; i < NUM_TEX_CLASS; i++) {
1045 c_nos = PRRR_NOS(prrr, i);
1046 c_mt = PRRR_MT(prrr, i);
1047 c_icp = NMRR_ICP(nmrr, i);
1048 c_ocp = NMRR_OCP(nmrr, i);
1049
1050 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1051 return i;
1052 }
1053
1054 return -ENODEV;
1055}
1056
1057static void __init setup_iommu_tex_classes(void)
1058{
1059 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1060 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1061
1062 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1063 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1064
1065 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1066 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1067
1068 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1069 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1070}
1071
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001072static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001073{
Stepan Moskovchenko15f209c2011-10-31 15:32:44 -07001074 if (!msm_soc_version_supports_iommu())
1075 return -ENODEV;
1076
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001077 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001078 register_iommu(&msm_iommu_ops);
1079 return 0;
1080}
1081
1082subsys_initcall(msm_iommu_init);
1083
1084MODULE_LICENSE("GPL v2");
1085MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");