blob: d495c1bd40310eecae13069511c597a62c2cfab3 [file] [log] [blame]
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -080041/* Sharability attributes of MSM IOMMU mappings */
42#define MSM_IOMMU_ATTR_NON_SH 0x0
43#define MSM_IOMMU_ATTR_SH 0x4
44
45/* Cacheability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NONCACHED 0x0
47#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
48#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
49#define MSM_IOMMU_ATTR_CACHED_WT 0x3
50
51
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070052static inline void clean_pte(unsigned long *start, unsigned long *end)
53{
54 dmac_flush_range(start, end);
55}
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070056
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080057static int msm_iommu_tex_class[4];
58
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -080059DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070060
61struct msm_priv {
62 unsigned long *pgtable;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -070063 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070064 struct list_head list_attached;
65};
66
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080067static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
68{
69 int ret;
70
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080071 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080072 if (ret)
73 goto fail;
74
75 if (drvdata->clk) {
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080076 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080077 if (ret)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080078 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080079 }
80fail:
81 return ret;
82}
83
84static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
85{
86 if (drvdata->clk)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080087 clk_disable_unprepare(drvdata->clk);
88 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080089}
90
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070091static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
92{
93 struct msm_priv *priv = domain->priv;
94 struct msm_iommu_drvdata *iommu_drvdata;
95 struct msm_iommu_ctx_drvdata *ctx_drvdata;
96 int ret = 0;
97 int asid;
98
99 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
100 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
101 BUG();
102
103 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
104 if (!iommu_drvdata)
105 BUG();
106
107 ret = __enable_clocks(iommu_drvdata);
108 if (ret)
109 goto fail;
110
111 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
112 ctx_drvdata->num);
113
114 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
115 asid | (va & TLBIVA_VA));
116 mb();
117 __disable_clocks(iommu_drvdata);
118 }
119fail:
120 return ret;
121}
122
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700123static int __flush_iotlb(struct iommu_domain *domain)
124{
125 struct msm_priv *priv = domain->priv;
126 struct msm_iommu_drvdata *iommu_drvdata;
127 struct msm_iommu_ctx_drvdata *ctx_drvdata;
128 int ret = 0;
129 int asid;
130
131 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
132 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
133 BUG();
134
135 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
136 if (!iommu_drvdata)
137 BUG();
138
139 ret = __enable_clocks(iommu_drvdata);
140 if (ret)
141 goto fail;
142
143 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
144 ctx_drvdata->num);
145
146 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
147 mb();
148 __disable_clocks(iommu_drvdata);
149 }
150fail:
151 return ret;
152}
153
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700154static void __reset_context(void __iomem *base, int ctx)
155{
156 SET_BPRCOSH(base, ctx, 0);
157 SET_BPRCISH(base, ctx, 0);
158 SET_BPRCNSH(base, ctx, 0);
159 SET_BPSHCFG(base, ctx, 0);
160 SET_BPMTCFG(base, ctx, 0);
161 SET_ACTLR(base, ctx, 0);
162 SET_SCTLR(base, ctx, 0);
163 SET_FSRRESTORE(base, ctx, 0);
164 SET_TTBR0(base, ctx, 0);
165 SET_TTBR1(base, ctx, 0);
166 SET_TTBCR(base, ctx, 0);
167 SET_BFBCR(base, ctx, 0);
168 SET_PAR(base, ctx, 0);
169 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700170 SET_TLBFLPTER(base, ctx, 0);
171 SET_TLBSLPTER(base, ctx, 0);
172 SET_TLBLKCR(base, ctx, 0);
173 SET_PRRR(base, ctx, 0);
174 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700176}
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178static void __program_context(void __iomem *base, int ctx, int ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600179 phys_addr_t pgtable, int redirect,
180 int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700181{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800182 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700184 __reset_context(base, ctx);
185
186 /* Set up HTW mode */
187 /* TLB miss configuration: perform HTW on miss */
188 SET_TLBMCFG(base, ctx, 0x3);
189
190 /* V2P configuration: HTW for access */
191 SET_V2PCFG(base, ctx, 0x3);
192
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600193 SET_TTBCR(base, ctx, ttbr_split);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600195 if (ttbr_split)
196 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700197
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700198 /* Enable context fault interrupt */
199 SET_CFEIE(base, ctx, 1);
200
201 /* Stall access on a context fault and let the handler deal with it */
202 SET_CFCFG(base, ctx, 1);
203
204 /* Redirect all cacheable requests to L2 slave port. */
205 SET_RCISH(base, ctx, 1);
206 SET_RCOSH(base, ctx, 1);
207 SET_RCNSH(base, ctx, 1);
208
209 /* Turn on TEX Remap */
210 SET_TRE(base, ctx, 1);
211
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800212 /* Set TEX remap attributes */
213 RCP15_PRRR(prrr);
214 RCP15_NMRR(nmrr);
215 SET_PRRR(base, ctx, prrr);
216 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700217
218 /* Turn on BFB prefetch */
219 SET_BFBDFE(base, ctx, 1);
220
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700221 /* Configure page tables as inner-cacheable and shareable to reduce
222 * the TLB miss penalty.
223 */
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700224 if (redirect) {
225 SET_TTBR0_SH(base, ctx, 1);
226 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700227
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700228 SET_TTBR0_NOS(base, ctx, 1);
229 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700230
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700231 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
232 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700233
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700234 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
235 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700236
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700237 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
238 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
239 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700240
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 /* Find if this page table is used elsewhere, and re-use ASID */
242 found = 0;
243 for (i = 0; i < ncb; i++)
244 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
245 i != ctx) {
246 SET_CONTEXTIDR_ASID(base, ctx, \
247 GET_CONTEXTIDR_ASID(base, i));
248 found = 1;
249 break;
250 }
251
252 /* If page table is new, find an unused ASID */
253 if (!found) {
254 for (i = 0; i < ncb; i++) {
255 found = 0;
256 for (j = 0; j < ncb; j++) {
257 if (GET_CONTEXTIDR_ASID(base, j) == i &&
258 j != ctx)
259 found = 1;
260 }
261
262 if (!found) {
263 SET_CONTEXTIDR_ASID(base, ctx, i);
264 break;
265 }
266 }
267 BUG_ON(found);
268 }
269
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700270 /* Enable the MMU */
271 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700273}
274
Stepan Moskovchenkoff2d3662011-08-31 17:13:32 -0700275static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700276{
277 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
278
279 if (!priv)
280 goto fail_nomem;
281
282 INIT_LIST_HEAD(&priv->list_attached);
283 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
284 get_order(SZ_16K));
285
286 if (!priv->pgtable)
287 goto fail_nomem;
288
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700289#ifdef CONFIG_IOMMU_PGTABLES_L2
290 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
291#endif
292
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700293 memset(priv->pgtable, 0, SZ_16K);
294 domain->priv = priv;
295 return 0;
296
297fail_nomem:
298 kfree(priv);
299 return -ENOMEM;
300}
301
302static void msm_iommu_domain_destroy(struct iommu_domain *domain)
303{
304 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700305 unsigned long *fl_table;
306 int i;
307
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800308 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700309 priv = domain->priv;
310 domain->priv = NULL;
311
312 if (priv) {
313 fl_table = priv->pgtable;
314
315 for (i = 0; i < NUM_FL_PTE; i++)
316 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
317 free_page((unsigned long) __va(((fl_table[i]) &
318 FL_BASE_MASK)));
319
320 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
321 priv->pgtable = NULL;
322 }
323
324 kfree(priv);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800325 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700326}
327
328static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
329{
330 struct msm_priv *priv;
331 struct msm_iommu_ctx_dev *ctx_dev;
332 struct msm_iommu_drvdata *iommu_drvdata;
333 struct msm_iommu_ctx_drvdata *ctx_drvdata;
334 struct msm_iommu_ctx_drvdata *tmp_drvdata;
335 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700336
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800337 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700338
339 priv = domain->priv;
340
341 if (!priv || !dev) {
342 ret = -EINVAL;
343 goto fail;
344 }
345
346 iommu_drvdata = dev_get_drvdata(dev->parent);
347 ctx_drvdata = dev_get_drvdata(dev);
348 ctx_dev = dev->platform_data;
349
350 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
351 ret = -EINVAL;
352 goto fail;
353 }
354
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800355 if (!list_empty(&ctx_drvdata->attached_elm)) {
356 ret = -EBUSY;
357 goto fail;
358 }
359
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700360 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
361 if (tmp_drvdata == ctx_drvdata) {
362 ret = -EBUSY;
363 goto fail;
364 }
365
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800366 ret = __enable_clocks(iommu_drvdata);
367 if (ret)
368 goto fail;
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600371 __pa(priv->pgtable), priv->redirect,
372 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700373
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800374 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700375 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700376
377fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800378 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700379 return ret;
380}
381
382static void msm_iommu_detach_dev(struct iommu_domain *domain,
383 struct device *dev)
384{
385 struct msm_priv *priv;
386 struct msm_iommu_ctx_dev *ctx_dev;
387 struct msm_iommu_drvdata *iommu_drvdata;
388 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800389 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700390
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800391 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700392 priv = domain->priv;
393
394 if (!priv || !dev)
395 goto fail;
396
397 iommu_drvdata = dev_get_drvdata(dev->parent);
398 ctx_drvdata = dev_get_drvdata(dev);
399 ctx_dev = dev->platform_data;
400
401 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
402 goto fail;
403
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800404 ret = __enable_clocks(iommu_drvdata);
405 if (ret)
406 goto fail;
407
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700408 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
409 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
410
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700411 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800412 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700413 list_del_init(&ctx_drvdata->attached_elm);
414
415fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800416 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700417}
418
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700419static int __get_pgprot(int prot, int len)
420{
421 unsigned int pgprot;
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800422 int tex;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700423
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800424 if (prot & IOMMU_CACHE)
425 tex = (pgprot_kernel >> 2) & 0x07;
426 else
427 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700428
429 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
430 return 0;
431
432 if (len == SZ_16M || len == SZ_1M) {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800433 pgprot = FL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700434 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
435 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
436 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
437 } else {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800438 pgprot = SL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700439 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
440 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
441 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
442 }
443
444 return pgprot;
445}
446
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700447static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
448 phys_addr_t pa, int order, int prot)
449{
450 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700451 unsigned long *fl_table;
452 unsigned long *fl_pte;
453 unsigned long fl_offset;
454 unsigned long *sl_table;
455 unsigned long *sl_pte;
456 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800457 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700458 size_t len = 0x1000UL << order;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700459 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700460
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800461 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700462
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800463 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700464 if (!priv) {
465 ret = -EINVAL;
466 goto fail;
467 }
468
469 fl_table = priv->pgtable;
470
471 if (len != SZ_16M && len != SZ_1M &&
472 len != SZ_64K && len != SZ_4K) {
473 pr_debug("Bad size: %d\n", len);
474 ret = -EINVAL;
475 goto fail;
476 }
477
478 if (!fl_table) {
479 pr_debug("Null page table\n");
480 ret = -EINVAL;
481 goto fail;
482 }
483
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700484 pgprot = __get_pgprot(prot, len);
485
486 if (!pgprot) {
487 ret = -EINVAL;
488 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800489 }
490
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700491 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
492 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
493
494 if (len == SZ_16M) {
495 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496
497 for (i = 0; i < 16; i++)
498 if (*(fl_pte+i)) {
499 ret = -EBUSY;
500 goto fail;
501 }
502
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700503 for (i = 0; i < 16; i++)
504 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
505 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800506 FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700507 if (!priv->redirect)
508 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700509 }
510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 if (len == SZ_1M) {
512 if (*fl_pte) {
513 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700514 goto fail;
515 }
516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
518 FL_TYPE_SECT | FL_SHARED | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700519 if (!priv->redirect)
520 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 }
522
523 /* Need a 2nd level table */
524 if (len == SZ_4K || len == SZ_64K) {
525
526 if (*fl_pte == 0) {
527 unsigned long *sl;
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800528 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 get_order(SZ_4K));
530
531 if (!sl) {
532 pr_debug("Could not allocate second level table\n");
533 ret = -ENOMEM;
534 goto fail;
535 }
536 memset(sl, 0, SZ_4K);
537
538 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
539 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700540
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700541 if (!priv->redirect)
542 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 }
544
545 if (!(*fl_pte & FL_TYPE_TABLE)) {
546 ret = -EBUSY;
547 goto fail;
548 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700549 }
550
551 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
552 sl_offset = SL_OFFSET(va);
553 sl_pte = sl_table + sl_offset;
554
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 if (len == SZ_4K) {
556 if (*sl_pte) {
557 ret = -EBUSY;
558 goto fail;
559 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700560
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800561 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800562 SL_SHARED | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700563 if (!priv->redirect)
564 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700566
567 if (len == SZ_64K) {
568 int i;
569
570 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 if (*(sl_pte+i)) {
572 ret = -EBUSY;
573 goto fail;
574 }
575
576 for (i = 0; i < 16; i++)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700577 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800578 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700579
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700580 if (!priv->redirect)
581 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700582 }
583
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700584 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700585fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800586 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700587 return ret;
588}
589
590static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
591 int order)
592{
593 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700594 unsigned long *fl_table;
595 unsigned long *fl_pte;
596 unsigned long fl_offset;
597 unsigned long *sl_table;
598 unsigned long *sl_pte;
599 unsigned long sl_offset;
600 size_t len = 0x1000UL << order;
601 int i, ret = 0;
602
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800603 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700604
605 priv = domain->priv;
606
607 if (!priv) {
608 ret = -ENODEV;
609 goto fail;
610 }
611
612 fl_table = priv->pgtable;
613
614 if (len != SZ_16M && len != SZ_1M &&
615 len != SZ_64K && len != SZ_4K) {
616 pr_debug("Bad length: %d\n", len);
617 ret = -EINVAL;
618 goto fail;
619 }
620
621 if (!fl_table) {
622 pr_debug("Null page table\n");
623 ret = -EINVAL;
624 goto fail;
625 }
626
627 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
628 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
629
630 if (*fl_pte == 0) {
631 pr_debug("First level PTE is 0\n");
632 ret = -ENODEV;
633 goto fail;
634 }
635
636 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700637 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700638 for (i = 0; i < 16; i++)
639 *(fl_pte+i) = 0;
640
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700641 if (!priv->redirect)
642 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700643 }
644
645 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700646 *fl_pte = 0;
647
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700648 if (!priv->redirect)
649 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700650 }
651
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700652 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
653 sl_offset = SL_OFFSET(va);
654 sl_pte = sl_table + sl_offset;
655
656 if (len == SZ_64K) {
657 for (i = 0; i < 16; i++)
658 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700659
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700660 if (!priv->redirect)
661 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700662 }
663
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700664 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700665 *sl_pte = 0;
666
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700667 if (!priv->redirect)
668 clean_pte(sl_pte, sl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700669 }
670
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700671 if (len == SZ_4K || len == SZ_64K) {
672 int used = 0;
673
674 for (i = 0; i < NUM_SL_PTE; i++)
675 if (sl_table[i])
676 used = 1;
677 if (!used) {
678 free_page((unsigned long)sl_table);
679 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700680
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700681 if (!priv->redirect)
682 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700683 }
684 }
685
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700686 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700687fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800688 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700689 return ret;
690}
691
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600692static unsigned int get_phys_addr(struct scatterlist *sg)
693{
694 /*
695 * Try sg_dma_address first so that we can
696 * map carveout regions that do not have a
697 * struct page associated with them.
698 */
699 unsigned int pa = sg_dma_address(sg);
700 if (pa == 0)
701 pa = sg_phys(sg);
702 return pa;
703}
704
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700705static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
706 struct scatterlist *sg, unsigned int len,
707 int prot)
708{
709 unsigned int pa;
710 unsigned int offset = 0;
711 unsigned int pgprot;
712 unsigned long *fl_table;
713 unsigned long *fl_pte;
714 unsigned long fl_offset;
715 unsigned long *sl_table;
716 unsigned long sl_offset, sl_start;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700717 unsigned int chunk_offset = 0;
718 unsigned int chunk_pa;
719 int ret = 0;
720 struct msm_priv *priv;
721
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800722 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700723
724 BUG_ON(len & (SZ_4K - 1));
725
726 priv = domain->priv;
727 fl_table = priv->pgtable;
728
729 pgprot = __get_pgprot(prot, SZ_4K);
730
731 if (!pgprot) {
732 ret = -EINVAL;
733 goto fail;
734 }
735
736 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
737 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
738
739 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
740 sl_offset = SL_OFFSET(va);
741
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600742 chunk_pa = get_phys_addr(sg);
743 if (chunk_pa == 0) {
744 pr_debug("No dma address for sg %p\n", sg);
745 ret = -EINVAL;
746 goto fail;
747 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700748
749 while (offset < len) {
750 /* Set up a 2nd level page table if one doesn't exist */
751 if (*fl_pte == 0) {
752 sl_table = (unsigned long *)
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800753 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700754
755 if (!sl_table) {
756 pr_debug("Could not allocate second level table\n");
757 ret = -ENOMEM;
758 goto fail;
759 }
760
761 memset(sl_table, 0, SZ_4K);
762 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
763 FL_TYPE_TABLE);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700764 if (!priv->redirect)
765 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700766 } else
767 sl_table = (unsigned long *)
768 __va(((*fl_pte) & FL_BASE_MASK));
769
770 /* Keep track of initial position so we
771 * don't clean more than we have to
772 */
773 sl_start = sl_offset;
774
775 /* Build the 2nd level page table */
776 while (offset < len && sl_offset < NUM_SL_PTE) {
777 pa = chunk_pa + chunk_offset;
778 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
779 pgprot | SL_AP0 | SL_AP1 | SL_NG |
780 SL_SHARED | SL_TYPE_SMALL;
781 sl_offset++;
782 offset += SZ_4K;
783
784 chunk_offset += SZ_4K;
785
786 if (chunk_offset >= sg->length && offset < len) {
787 chunk_offset = 0;
788 sg = sg_next(sg);
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600789 chunk_pa = get_phys_addr(sg);
790 if (chunk_pa == 0) {
791 pr_debug("No dma address for sg %p\n",
792 sg);
793 ret = -EINVAL;
794 goto fail;
795 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700796 }
797 }
798
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700799 if (!priv->redirect)
800 clean_pte(sl_table + sl_start, sl_table + sl_offset);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700801
802 fl_pte++;
803 sl_offset = 0;
804 }
805 __flush_iotlb(domain);
806fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800807 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700808 return ret;
809}
810
811
812static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
813 unsigned int len)
814{
815 unsigned int offset = 0;
816 unsigned long *fl_table;
817 unsigned long *fl_pte;
818 unsigned long fl_offset;
819 unsigned long *sl_table;
820 unsigned long sl_start, sl_end;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700821 int used, i;
822 struct msm_priv *priv;
823
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800824 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700825
826 BUG_ON(len & (SZ_4K - 1));
827
828 priv = domain->priv;
829 fl_table = priv->pgtable;
830
831 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
832 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
833
834 sl_start = SL_OFFSET(va);
835
836 while (offset < len) {
837 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
838 sl_end = ((len - offset) / SZ_4K) + sl_start;
839
840 if (sl_end > NUM_SL_PTE)
841 sl_end = NUM_SL_PTE;
842
843 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700844 if (!priv->redirect)
845 clean_pte(sl_table + sl_start, sl_table + sl_end);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700846
847 offset += (sl_end - sl_start) * SZ_4K;
848
849 /* Unmap and free the 2nd level table if all mappings in it
850 * were removed. This saves memory, but the table will need
851 * to be re-allocated the next time someone tries to map these
852 * VAs.
853 */
854 used = 0;
855
856 /* If we just unmapped the whole table, don't bother
857 * seeing if there are still used entries left.
858 */
859 if (sl_end - sl_start != NUM_SL_PTE)
860 for (i = 0; i < NUM_SL_PTE; i++)
861 if (sl_table[i]) {
862 used = 1;
863 break;
864 }
865 if (!used) {
866 free_page((unsigned long)sl_table);
867 *fl_pte = 0;
868
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700869 if (!priv->redirect)
870 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700871 }
872
873 sl_start = 0;
874 fl_pte++;
875 }
876
877 __flush_iotlb(domain);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800878 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700879 return 0;
880}
881
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700882static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
883 unsigned long va)
884{
885 struct msm_priv *priv;
886 struct msm_iommu_drvdata *iommu_drvdata;
887 struct msm_iommu_ctx_drvdata *ctx_drvdata;
888 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700889 void __iomem *base;
890 phys_addr_t ret = 0;
891 int ctx;
892
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800893 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700894
895 priv = domain->priv;
896 if (list_empty(&priv->list_attached))
897 goto fail;
898
899 ctx_drvdata = list_entry(priv->list_attached.next,
900 struct msm_iommu_ctx_drvdata, attached_elm);
901 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
902
903 base = iommu_drvdata->base;
904 ctx = ctx_drvdata->num;
905
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800906 ret = __enable_clocks(iommu_drvdata);
907 if (ret)
908 goto fail;
909
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800910 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700913 par = GET_PAR(base, ctx);
914
915 /* We are dealing with a supersection */
916 if (GET_NOFAULT_SS(base, ctx))
917 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
918 else /* Upper 20 bits from PAR, lower 12 from VA */
919 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
920
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800921 if (GET_FAULT(base, ctx))
922 ret = 0;
923
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800924 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700925fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800926 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700927 return ret;
928}
929
930static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
931 unsigned long cap)
932{
933 return 0;
934}
935
936static void print_ctx_regs(void __iomem *base, int ctx)
937{
938 unsigned int fsr = GET_FSR(base, ctx);
939 pr_err("FAR = %08x PAR = %08x\n",
940 GET_FAR(base, ctx), GET_PAR(base, ctx));
941 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
942 (fsr & 0x02) ? "TF " : "",
943 (fsr & 0x04) ? "AFF " : "",
944 (fsr & 0x08) ? "APF " : "",
945 (fsr & 0x10) ? "TLBMF " : "",
946 (fsr & 0x20) ? "HTWDEEF " : "",
947 (fsr & 0x40) ? "HTWSEEF " : "",
948 (fsr & 0x80) ? "MHF " : "",
949 (fsr & 0x10000) ? "SL " : "",
950 (fsr & 0x40000000) ? "SS " : "",
951 (fsr & 0x80000000) ? "MULTI " : "");
952
953 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
954 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
955 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
956 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
957 pr_err("SCTLR = %08x ACTLR = %08x\n",
958 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
959 pr_err("PRRR = %08x NMRR = %08x\n",
960 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
961}
962
963irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
964{
965 struct msm_iommu_drvdata *drvdata = dev_id;
966 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800967 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800968 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700969
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800970 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700971
972 if (!drvdata) {
973 pr_err("Invalid device ID in context interrupt handler\n");
974 goto fail;
975 }
976
977 base = drvdata->base;
978
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700979 pr_err("Unexpected IOMMU page fault!\n");
980 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700982
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800983 ret = __enable_clocks(drvdata);
984 if (ret)
985 goto fail;
986
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800987 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700988 fsr = GET_FSR(base, i);
989 if (fsr) {
990 pr_err("Fault occurred in context %d.\n", i);
991 pr_err("Interesting registers:\n");
992 print_ctx_regs(base, i);
993 SET_FSR(base, i, 0x4000000F);
994 }
995 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800996 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700997fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800998 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700999 return 0;
1000}
1001
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001002static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1003{
1004 struct msm_priv *priv = domain->priv;
1005 return __pa(priv->pgtable);
1006}
1007
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001008static struct iommu_ops msm_iommu_ops = {
1009 .domain_init = msm_iommu_domain_init,
1010 .domain_destroy = msm_iommu_domain_destroy,
1011 .attach_dev = msm_iommu_attach_dev,
1012 .detach_dev = msm_iommu_detach_dev,
1013 .map = msm_iommu_map,
1014 .unmap = msm_iommu_unmap,
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -07001015 .map_range = msm_iommu_map_range,
1016 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001017 .iova_to_phys = msm_iommu_iova_to_phys,
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001018 .domain_has_cap = msm_iommu_domain_has_cap,
1019 .get_pt_base_addr = msm_iommu_get_pt_base_addr
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001020};
1021
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001022static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1023{
1024 int i = 0;
1025 unsigned int prrr = 0;
1026 unsigned int nmrr = 0;
1027 int c_icp, c_ocp, c_mt, c_nos;
1028
1029 RCP15_PRRR(prrr);
1030 RCP15_NMRR(nmrr);
1031
1032 for (i = 0; i < NUM_TEX_CLASS; i++) {
1033 c_nos = PRRR_NOS(prrr, i);
1034 c_mt = PRRR_MT(prrr, i);
1035 c_icp = NMRR_ICP(nmrr, i);
1036 c_ocp = NMRR_OCP(nmrr, i);
1037
1038 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1039 return i;
1040 }
1041
1042 return -ENODEV;
1043}
1044
1045static void __init setup_iommu_tex_classes(void)
1046{
1047 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1048 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1049
1050 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1051 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1052
1053 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1054 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1055
1056 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1057 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1058}
1059
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001060static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001061{
Stepan Moskovchenko15f209c2011-10-31 15:32:44 -07001062 if (!msm_soc_version_supports_iommu())
1063 return -ENODEV;
1064
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001065 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001066 register_iommu(&msm_iommu_ops);
1067 return 0;
1068}
1069
1070subsys_initcall(msm_iommu_init);
1071
1072MODULE_LICENSE("GPL v2");
1073MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");