blob: 0652f3bc4134dadf0dde5d16af171e6b1ec5eab5 [file] [log] [blame]
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -080041/* Sharability attributes of MSM IOMMU mappings */
42#define MSM_IOMMU_ATTR_NON_SH 0x0
43#define MSM_IOMMU_ATTR_SH 0x4
44
45/* Cacheability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NONCACHED 0x0
47#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
48#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
49#define MSM_IOMMU_ATTR_CACHED_WT 0x3
50
51
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070052static inline void clean_pte(unsigned long *start, unsigned long *end)
53{
54 dmac_flush_range(start, end);
55}
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070056
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080057static int msm_iommu_tex_class[4];
58
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -080059DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070060
61struct msm_priv {
62 unsigned long *pgtable;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -070063 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070064 struct list_head list_attached;
65};
66
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080067static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
68{
69 int ret;
70
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080071 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080072 if (ret)
73 goto fail;
74
75 if (drvdata->clk) {
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080076 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080077 if (ret)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080078 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080079 }
80fail:
81 return ret;
82}
83
84static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
85{
86 if (drvdata->clk)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080087 clk_disable_unprepare(drvdata->clk);
88 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080089}
90
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070091static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
92{
93 struct msm_priv *priv = domain->priv;
94 struct msm_iommu_drvdata *iommu_drvdata;
95 struct msm_iommu_ctx_drvdata *ctx_drvdata;
96 int ret = 0;
97 int asid;
98
99 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
100 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
101 BUG();
102
103 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
104 if (!iommu_drvdata)
105 BUG();
106
107 ret = __enable_clocks(iommu_drvdata);
108 if (ret)
109 goto fail;
110
111 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
112 ctx_drvdata->num);
113
114 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
115 asid | (va & TLBIVA_VA));
116 mb();
117 __disable_clocks(iommu_drvdata);
118 }
119fail:
120 return ret;
121}
122
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700123static int __flush_iotlb(struct iommu_domain *domain)
124{
125 struct msm_priv *priv = domain->priv;
126 struct msm_iommu_drvdata *iommu_drvdata;
127 struct msm_iommu_ctx_drvdata *ctx_drvdata;
128 int ret = 0;
129 int asid;
130
131 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
132 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
133 BUG();
134
135 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
136 if (!iommu_drvdata)
137 BUG();
138
139 ret = __enable_clocks(iommu_drvdata);
140 if (ret)
141 goto fail;
142
143 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
144 ctx_drvdata->num);
145
146 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
147 mb();
148 __disable_clocks(iommu_drvdata);
149 }
150fail:
151 return ret;
152}
153
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700154static void __reset_context(void __iomem *base, int ctx)
155{
156 SET_BPRCOSH(base, ctx, 0);
157 SET_BPRCISH(base, ctx, 0);
158 SET_BPRCNSH(base, ctx, 0);
159 SET_BPSHCFG(base, ctx, 0);
160 SET_BPMTCFG(base, ctx, 0);
161 SET_ACTLR(base, ctx, 0);
162 SET_SCTLR(base, ctx, 0);
163 SET_FSRRESTORE(base, ctx, 0);
164 SET_TTBR0(base, ctx, 0);
165 SET_TTBR1(base, ctx, 0);
166 SET_TTBCR(base, ctx, 0);
167 SET_BFBCR(base, ctx, 0);
168 SET_PAR(base, ctx, 0);
169 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700170 SET_TLBFLPTER(base, ctx, 0);
171 SET_TLBSLPTER(base, ctx, 0);
172 SET_TLBLKCR(base, ctx, 0);
173 SET_PRRR(base, ctx, 0);
174 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700176}
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178static void __program_context(void __iomem *base, int ctx, int ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600179 phys_addr_t pgtable, int redirect,
180 int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700181{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800182 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700184 __reset_context(base, ctx);
185
186 /* Set up HTW mode */
187 /* TLB miss configuration: perform HTW on miss */
188 SET_TLBMCFG(base, ctx, 0x3);
189
190 /* V2P configuration: HTW for access */
191 SET_V2PCFG(base, ctx, 0x3);
192
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600193 SET_TTBCR(base, ctx, ttbr_split);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600195 if (ttbr_split)
196 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700197
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700198 /* Enable context fault interrupt */
199 SET_CFEIE(base, ctx, 1);
200
201 /* Stall access on a context fault and let the handler deal with it */
202 SET_CFCFG(base, ctx, 1);
203
204 /* Redirect all cacheable requests to L2 slave port. */
205 SET_RCISH(base, ctx, 1);
206 SET_RCOSH(base, ctx, 1);
207 SET_RCNSH(base, ctx, 1);
208
209 /* Turn on TEX Remap */
210 SET_TRE(base, ctx, 1);
211
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800212 /* Set TEX remap attributes */
213 RCP15_PRRR(prrr);
214 RCP15_NMRR(nmrr);
215 SET_PRRR(base, ctx, prrr);
216 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700217
218 /* Turn on BFB prefetch */
219 SET_BFBDFE(base, ctx, 1);
220
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700221 /* Configure page tables as inner-cacheable and shareable to reduce
222 * the TLB miss penalty.
223 */
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700224 if (redirect) {
225 SET_TTBR0_SH(base, ctx, 1);
226 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700227
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700228 SET_TTBR0_NOS(base, ctx, 1);
229 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700230
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700231 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
232 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700233
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700234 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
235 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700236
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700237 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
238 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
239 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700240
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 /* Find if this page table is used elsewhere, and re-use ASID */
242 found = 0;
243 for (i = 0; i < ncb; i++)
244 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
245 i != ctx) {
246 SET_CONTEXTIDR_ASID(base, ctx, \
247 GET_CONTEXTIDR_ASID(base, i));
248 found = 1;
249 break;
250 }
251
252 /* If page table is new, find an unused ASID */
253 if (!found) {
254 for (i = 0; i < ncb; i++) {
255 found = 0;
256 for (j = 0; j < ncb; j++) {
257 if (GET_CONTEXTIDR_ASID(base, j) == i &&
258 j != ctx)
259 found = 1;
260 }
261
262 if (!found) {
263 SET_CONTEXTIDR_ASID(base, ctx, i);
264 break;
265 }
266 }
267 BUG_ON(found);
268 }
269
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700270 /* Enable the MMU */
271 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700273}
274
Stepan Moskovchenkoff2d3662011-08-31 17:13:32 -0700275static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700276{
277 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
278
279 if (!priv)
280 goto fail_nomem;
281
282 INIT_LIST_HEAD(&priv->list_attached);
283 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
284 get_order(SZ_16K));
285
286 if (!priv->pgtable)
287 goto fail_nomem;
288
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700289#ifdef CONFIG_IOMMU_PGTABLES_L2
290 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
291#endif
292
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700293 memset(priv->pgtable, 0, SZ_16K);
294 domain->priv = priv;
295 return 0;
296
297fail_nomem:
298 kfree(priv);
299 return -ENOMEM;
300}
301
302static void msm_iommu_domain_destroy(struct iommu_domain *domain)
303{
304 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700305 unsigned long *fl_table;
306 int i;
307
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800308 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700309 priv = domain->priv;
310 domain->priv = NULL;
311
312 if (priv) {
313 fl_table = priv->pgtable;
314
315 for (i = 0; i < NUM_FL_PTE; i++)
316 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
317 free_page((unsigned long) __va(((fl_table[i]) &
318 FL_BASE_MASK)));
319
320 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
321 priv->pgtable = NULL;
322 }
323
324 kfree(priv);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800325 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700326}
327
328static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
329{
330 struct msm_priv *priv;
331 struct msm_iommu_ctx_dev *ctx_dev;
332 struct msm_iommu_drvdata *iommu_drvdata;
333 struct msm_iommu_ctx_drvdata *ctx_drvdata;
334 struct msm_iommu_ctx_drvdata *tmp_drvdata;
335 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700336
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800337 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700338
339 priv = domain->priv;
340
341 if (!priv || !dev) {
342 ret = -EINVAL;
343 goto fail;
344 }
345
346 iommu_drvdata = dev_get_drvdata(dev->parent);
347 ctx_drvdata = dev_get_drvdata(dev);
348 ctx_dev = dev->platform_data;
349
350 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
351 ret = -EINVAL;
352 goto fail;
353 }
354
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800355 if (!list_empty(&ctx_drvdata->attached_elm)) {
356 ret = -EBUSY;
357 goto fail;
358 }
359
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700360 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
361 if (tmp_drvdata == ctx_drvdata) {
362 ret = -EBUSY;
363 goto fail;
364 }
365
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800366 ret = __enable_clocks(iommu_drvdata);
367 if (ret)
368 goto fail;
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600371 __pa(priv->pgtable), priv->redirect,
372 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700373
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800374 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700375 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700376
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700377 ctx_drvdata->attached_domain = domain;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700378fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800379 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700380 return ret;
381}
382
383static void msm_iommu_detach_dev(struct iommu_domain *domain,
384 struct device *dev)
385{
386 struct msm_priv *priv;
387 struct msm_iommu_ctx_dev *ctx_dev;
388 struct msm_iommu_drvdata *iommu_drvdata;
389 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800390 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700391
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800392 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700393 priv = domain->priv;
394
395 if (!priv || !dev)
396 goto fail;
397
398 iommu_drvdata = dev_get_drvdata(dev->parent);
399 ctx_drvdata = dev_get_drvdata(dev);
400 ctx_dev = dev->platform_data;
401
402 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
403 goto fail;
404
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800405 ret = __enable_clocks(iommu_drvdata);
406 if (ret)
407 goto fail;
408
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700409 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
410 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
411
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700412 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800413 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700414 list_del_init(&ctx_drvdata->attached_elm);
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700415 ctx_drvdata->attached_domain = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700416fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800417 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700418}
419
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700420static int __get_pgprot(int prot, int len)
421{
422 unsigned int pgprot;
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800423 int tex;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700424
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700425 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
426 prot |= IOMMU_READ | IOMMU_WRITE;
427 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
428 }
429
430 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
431 prot |= IOMMU_READ;
432 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
433 }
434
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800435 if (prot & IOMMU_CACHE)
436 tex = (pgprot_kernel >> 2) & 0x07;
437 else
438 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700439
440 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
441 return 0;
442
443 if (len == SZ_16M || len == SZ_1M) {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800444 pgprot = FL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700445 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
446 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
447 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700448 pgprot |= FL_AP0 | FL_AP1;
449 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700450 } else {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800451 pgprot = SL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700452 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
453 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
454 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700455 pgprot |= SL_AP0 | SL_AP1;
456 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700457 }
458
459 return pgprot;
460}
461
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700462static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
463 phys_addr_t pa, int order, int prot)
464{
465 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700466 unsigned long *fl_table;
467 unsigned long *fl_pte;
468 unsigned long fl_offset;
469 unsigned long *sl_table;
470 unsigned long *sl_pte;
471 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800472 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700473 size_t len = 0x1000UL << order;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700474 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700475
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800476 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700477
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800478 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700479 if (!priv) {
480 ret = -EINVAL;
481 goto fail;
482 }
483
484 fl_table = priv->pgtable;
485
486 if (len != SZ_16M && len != SZ_1M &&
487 len != SZ_64K && len != SZ_4K) {
488 pr_debug("Bad size: %d\n", len);
489 ret = -EINVAL;
490 goto fail;
491 }
492
493 if (!fl_table) {
494 pr_debug("Null page table\n");
495 ret = -EINVAL;
496 goto fail;
497 }
498
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700499 pgprot = __get_pgprot(prot, len);
500
501 if (!pgprot) {
502 ret = -EINVAL;
503 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800504 }
505
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700506 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
507 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
508
509 if (len == SZ_16M) {
510 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511
512 for (i = 0; i < 16; i++)
513 if (*(fl_pte+i)) {
514 ret = -EBUSY;
515 goto fail;
516 }
517
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700518 for (i = 0; i < 16; i++)
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700519 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
520 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700521 if (!priv->redirect)
522 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700523 }
524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 if (len == SZ_1M) {
526 if (*fl_pte) {
527 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700528 goto fail;
529 }
530
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700531 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
532 | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700533 if (!priv->redirect)
534 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 }
536
537 /* Need a 2nd level table */
538 if (len == SZ_4K || len == SZ_64K) {
539
540 if (*fl_pte == 0) {
541 unsigned long *sl;
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800542 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 get_order(SZ_4K));
544
545 if (!sl) {
546 pr_debug("Could not allocate second level table\n");
547 ret = -ENOMEM;
548 goto fail;
549 }
550 memset(sl, 0, SZ_4K);
551
552 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
553 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700554
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700555 if (!priv->redirect)
556 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 }
558
559 if (!(*fl_pte & FL_TYPE_TABLE)) {
560 ret = -EBUSY;
561 goto fail;
562 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700563 }
564
565 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
566 sl_offset = SL_OFFSET(va);
567 sl_pte = sl_table + sl_offset;
568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 if (len == SZ_4K) {
570 if (*sl_pte) {
571 ret = -EBUSY;
572 goto fail;
573 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700574
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700575 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
576 | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700577 if (!priv->redirect)
578 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700580
581 if (len == SZ_64K) {
582 int i;
583
584 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 if (*(sl_pte+i)) {
586 ret = -EBUSY;
587 goto fail;
588 }
589
590 for (i = 0; i < 16; i++)
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700591 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
592 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700593
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700594 if (!priv->redirect)
595 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700596 }
597
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700598 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700599fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800600 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700601 return ret;
602}
603
604static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
605 int order)
606{
607 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700608 unsigned long *fl_table;
609 unsigned long *fl_pte;
610 unsigned long fl_offset;
611 unsigned long *sl_table;
612 unsigned long *sl_pte;
613 unsigned long sl_offset;
614 size_t len = 0x1000UL << order;
615 int i, ret = 0;
616
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800617 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700618
619 priv = domain->priv;
620
621 if (!priv) {
622 ret = -ENODEV;
623 goto fail;
624 }
625
626 fl_table = priv->pgtable;
627
628 if (len != SZ_16M && len != SZ_1M &&
629 len != SZ_64K && len != SZ_4K) {
630 pr_debug("Bad length: %d\n", len);
631 ret = -EINVAL;
632 goto fail;
633 }
634
635 if (!fl_table) {
636 pr_debug("Null page table\n");
637 ret = -EINVAL;
638 goto fail;
639 }
640
641 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
642 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
643
644 if (*fl_pte == 0) {
645 pr_debug("First level PTE is 0\n");
646 ret = -ENODEV;
647 goto fail;
648 }
649
650 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700651 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700652 for (i = 0; i < 16; i++)
653 *(fl_pte+i) = 0;
654
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700655 if (!priv->redirect)
656 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700657 }
658
659 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700660 *fl_pte = 0;
661
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700662 if (!priv->redirect)
663 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700664 }
665
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700666 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
667 sl_offset = SL_OFFSET(va);
668 sl_pte = sl_table + sl_offset;
669
670 if (len == SZ_64K) {
671 for (i = 0; i < 16; i++)
672 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700673
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700674 if (!priv->redirect)
675 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700676 }
677
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700678 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700679 *sl_pte = 0;
680
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700681 if (!priv->redirect)
682 clean_pte(sl_pte, sl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700683 }
684
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700685 if (len == SZ_4K || len == SZ_64K) {
686 int used = 0;
687
688 for (i = 0; i < NUM_SL_PTE; i++)
689 if (sl_table[i])
690 used = 1;
691 if (!used) {
692 free_page((unsigned long)sl_table);
693 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700694
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700695 if (!priv->redirect)
696 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700697 }
698 }
699
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700700 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700701fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800702 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700703 return ret;
704}
705
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600706static unsigned int get_phys_addr(struct scatterlist *sg)
707{
708 /*
709 * Try sg_dma_address first so that we can
710 * map carveout regions that do not have a
711 * struct page associated with them.
712 */
713 unsigned int pa = sg_dma_address(sg);
714 if (pa == 0)
715 pa = sg_phys(sg);
716 return pa;
717}
718
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700719static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
720 struct scatterlist *sg, unsigned int len,
721 int prot)
722{
723 unsigned int pa;
724 unsigned int offset = 0;
725 unsigned int pgprot;
726 unsigned long *fl_table;
727 unsigned long *fl_pte;
728 unsigned long fl_offset;
729 unsigned long *sl_table;
730 unsigned long sl_offset, sl_start;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700731 unsigned int chunk_offset = 0;
732 unsigned int chunk_pa;
733 int ret = 0;
734 struct msm_priv *priv;
735
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800736 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700737
738 BUG_ON(len & (SZ_4K - 1));
739
740 priv = domain->priv;
741 fl_table = priv->pgtable;
742
743 pgprot = __get_pgprot(prot, SZ_4K);
744
745 if (!pgprot) {
746 ret = -EINVAL;
747 goto fail;
748 }
749
750 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
751 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
752
753 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
754 sl_offset = SL_OFFSET(va);
755
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600756 chunk_pa = get_phys_addr(sg);
757 if (chunk_pa == 0) {
758 pr_debug("No dma address for sg %p\n", sg);
759 ret = -EINVAL;
760 goto fail;
761 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700762
763 while (offset < len) {
764 /* Set up a 2nd level page table if one doesn't exist */
765 if (*fl_pte == 0) {
766 sl_table = (unsigned long *)
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800767 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700768
769 if (!sl_table) {
770 pr_debug("Could not allocate second level table\n");
771 ret = -ENOMEM;
772 goto fail;
773 }
774
775 memset(sl_table, 0, SZ_4K);
776 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
777 FL_TYPE_TABLE);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700778 if (!priv->redirect)
779 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700780 } else
781 sl_table = (unsigned long *)
782 __va(((*fl_pte) & FL_BASE_MASK));
783
784 /* Keep track of initial position so we
785 * don't clean more than we have to
786 */
787 sl_start = sl_offset;
788
789 /* Build the 2nd level page table */
790 while (offset < len && sl_offset < NUM_SL_PTE) {
791 pa = chunk_pa + chunk_offset;
792 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700793 pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700794 sl_offset++;
795 offset += SZ_4K;
796
797 chunk_offset += SZ_4K;
798
799 if (chunk_offset >= sg->length && offset < len) {
800 chunk_offset = 0;
801 sg = sg_next(sg);
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600802 chunk_pa = get_phys_addr(sg);
803 if (chunk_pa == 0) {
804 pr_debug("No dma address for sg %p\n",
805 sg);
806 ret = -EINVAL;
807 goto fail;
808 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700809 }
810 }
811
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700812 if (!priv->redirect)
813 clean_pte(sl_table + sl_start, sl_table + sl_offset);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700814
815 fl_pte++;
816 sl_offset = 0;
817 }
818 __flush_iotlb(domain);
819fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800820 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700821 return ret;
822}
823
824
825static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
826 unsigned int len)
827{
828 unsigned int offset = 0;
829 unsigned long *fl_table;
830 unsigned long *fl_pte;
831 unsigned long fl_offset;
832 unsigned long *sl_table;
833 unsigned long sl_start, sl_end;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700834 int used, i;
835 struct msm_priv *priv;
836
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800837 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700838
839 BUG_ON(len & (SZ_4K - 1));
840
841 priv = domain->priv;
842 fl_table = priv->pgtable;
843
844 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
845 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
846
847 sl_start = SL_OFFSET(va);
848
849 while (offset < len) {
850 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
851 sl_end = ((len - offset) / SZ_4K) + sl_start;
852
853 if (sl_end > NUM_SL_PTE)
854 sl_end = NUM_SL_PTE;
855
856 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700857 if (!priv->redirect)
858 clean_pte(sl_table + sl_start, sl_table + sl_end);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700859
860 offset += (sl_end - sl_start) * SZ_4K;
861
862 /* Unmap and free the 2nd level table if all mappings in it
863 * were removed. This saves memory, but the table will need
864 * to be re-allocated the next time someone tries to map these
865 * VAs.
866 */
867 used = 0;
868
869 /* If we just unmapped the whole table, don't bother
870 * seeing if there are still used entries left.
871 */
872 if (sl_end - sl_start != NUM_SL_PTE)
873 for (i = 0; i < NUM_SL_PTE; i++)
874 if (sl_table[i]) {
875 used = 1;
876 break;
877 }
878 if (!used) {
879 free_page((unsigned long)sl_table);
880 *fl_pte = 0;
881
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700882 if (!priv->redirect)
883 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700884 }
885
886 sl_start = 0;
887 fl_pte++;
888 }
889
890 __flush_iotlb(domain);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800891 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700892 return 0;
893}
894
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700895static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
896 unsigned long va)
897{
898 struct msm_priv *priv;
899 struct msm_iommu_drvdata *iommu_drvdata;
900 struct msm_iommu_ctx_drvdata *ctx_drvdata;
901 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700902 void __iomem *base;
903 phys_addr_t ret = 0;
904 int ctx;
905
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800906 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700907
908 priv = domain->priv;
909 if (list_empty(&priv->list_attached))
910 goto fail;
911
912 ctx_drvdata = list_entry(priv->list_attached.next,
913 struct msm_iommu_ctx_drvdata, attached_elm);
914 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
915
916 base = iommu_drvdata->base;
917 ctx = ctx_drvdata->num;
918
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800919 ret = __enable_clocks(iommu_drvdata);
920 if (ret)
921 goto fail;
922
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800923 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700924
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700926 par = GET_PAR(base, ctx);
927
928 /* We are dealing with a supersection */
929 if (GET_NOFAULT_SS(base, ctx))
930 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
931 else /* Upper 20 bits from PAR, lower 12 from VA */
932 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
933
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800934 if (GET_FAULT(base, ctx))
935 ret = 0;
936
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800937 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700938fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800939 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700940 return ret;
941}
942
943static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
944 unsigned long cap)
945{
946 return 0;
947}
948
949static void print_ctx_regs(void __iomem *base, int ctx)
950{
951 unsigned int fsr = GET_FSR(base, ctx);
952 pr_err("FAR = %08x PAR = %08x\n",
953 GET_FAR(base, ctx), GET_PAR(base, ctx));
954 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
955 (fsr & 0x02) ? "TF " : "",
956 (fsr & 0x04) ? "AFF " : "",
957 (fsr & 0x08) ? "APF " : "",
958 (fsr & 0x10) ? "TLBMF " : "",
959 (fsr & 0x20) ? "HTWDEEF " : "",
960 (fsr & 0x40) ? "HTWSEEF " : "",
961 (fsr & 0x80) ? "MHF " : "",
962 (fsr & 0x10000) ? "SL " : "",
963 (fsr & 0x40000000) ? "SS " : "",
964 (fsr & 0x80000000) ? "MULTI " : "");
965
966 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
967 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
968 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
969 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
970 pr_err("SCTLR = %08x ACTLR = %08x\n",
971 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
972 pr_err("PRRR = %08x NMRR = %08x\n",
973 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
974}
975
976irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
977{
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700978 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
979 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700980 void __iomem *base;
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700981 unsigned int fsr, num;
982 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700983
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800984 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700985 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700986
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700987 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
988 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700989
990 base = drvdata->base;
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700991 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700992
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800993 ret = __enable_clocks(drvdata);
994 if (ret)
995 goto fail;
996
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700997 fsr = GET_FSR(base, num);
998
999 if (fsr) {
1000 if (!ctx_drvdata->attached_domain) {
1001 pr_err("Bad domain in interrupt handler\n");
1002 ret = -ENOSYS;
1003 } else
1004 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1005 &ctx_drvdata->pdev->dev,
1006 GET_FAR(base, num), 0);
1007
1008 if (ret == -ENOSYS) {
1009 pr_err("Unexpected IOMMU page fault!\n");
1010 pr_err("name = %s\n", drvdata->name);
1011 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001012 pr_err("Interesting registers:\n");
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -07001013 print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001014 }
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -07001015
1016 SET_FSR(base, num, fsr);
1017 SET_RESUME(base, num, 1);
1018
1019 ret = IRQ_HANDLED;
1020 } else
1021 ret = IRQ_NONE;
1022
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001023 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001024fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -08001025 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -07001026 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001027}
1028
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001029static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1030{
1031 struct msm_priv *priv = domain->priv;
1032 return __pa(priv->pgtable);
1033}
1034
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001035static struct iommu_ops msm_iommu_ops = {
1036 .domain_init = msm_iommu_domain_init,
1037 .domain_destroy = msm_iommu_domain_destroy,
1038 .attach_dev = msm_iommu_attach_dev,
1039 .detach_dev = msm_iommu_detach_dev,
1040 .map = msm_iommu_map,
1041 .unmap = msm_iommu_unmap,
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -07001042 .map_range = msm_iommu_map_range,
1043 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001044 .iova_to_phys = msm_iommu_iova_to_phys,
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001045 .domain_has_cap = msm_iommu_domain_has_cap,
1046 .get_pt_base_addr = msm_iommu_get_pt_base_addr
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001047};
1048
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001049static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1050{
1051 int i = 0;
1052 unsigned int prrr = 0;
1053 unsigned int nmrr = 0;
1054 int c_icp, c_ocp, c_mt, c_nos;
1055
1056 RCP15_PRRR(prrr);
1057 RCP15_NMRR(nmrr);
1058
1059 for (i = 0; i < NUM_TEX_CLASS; i++) {
1060 c_nos = PRRR_NOS(prrr, i);
1061 c_mt = PRRR_MT(prrr, i);
1062 c_icp = NMRR_ICP(nmrr, i);
1063 c_ocp = NMRR_OCP(nmrr, i);
1064
1065 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1066 return i;
1067 }
1068
1069 return -ENODEV;
1070}
1071
1072static void __init setup_iommu_tex_classes(void)
1073{
1074 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1075 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1076
1077 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1078 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1079
1080 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1081 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1082
1083 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1084 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1085}
1086
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001087static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001088{
Stepan Moskovchenko15f209c2011-10-31 15:32:44 -07001089 if (!msm_soc_version_supports_iommu())
1090 return -ENODEV;
1091
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001092 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001093 register_iommu(&msm_iommu_ops);
1094 return 0;
1095}
1096
1097subsys_initcall(msm_iommu_init);
1098
1099MODULE_LICENSE("GPL v2");
1100MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");