blob: 2f74f8f51204d53e30d7d2169f026612482791d0 [file] [log] [blame]
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070025
26#include <asm/cacheflush.h>
27#include <asm/sizes.h>
28
29#include <mach/iommu_hw-8xxx.h>
30#include <mach/iommu.h>
31
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080032#define MRC(reg, processor, op1, crn, crm, op2) \
33__asm__ __volatile__ ( \
34" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
35: "=r" (reg))
36
37#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
38#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
39
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070040#ifndef CONFIG_IOMMU_PGTABLES_L2
41static inline void clean_pte(unsigned long *start, unsigned long *end)
42{
43 dmac_flush_range(start, end);
44}
45#else
46static inline void clean_pte(unsigned long *start, unsigned long *end) { }
47#endif
48
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080049static int msm_iommu_tex_class[4];
50
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070051DEFINE_SPINLOCK(msm_iommu_lock);
52
53struct msm_priv {
54 unsigned long *pgtable;
55 struct list_head list_attached;
56};
57
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080058static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
59{
60 int ret;
61
62 ret = clk_enable(drvdata->pclk);
63 if (ret)
64 goto fail;
65
66 if (drvdata->clk) {
67 ret = clk_enable(drvdata->clk);
68 if (ret)
69 clk_disable(drvdata->pclk);
70 }
71fail:
72 return ret;
73}
74
75static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
76{
77 if (drvdata->clk)
78 clk_disable(drvdata->clk);
79 clk_disable(drvdata->pclk);
80}
81
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070082static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
83{
84 struct msm_priv *priv = domain->priv;
85 struct msm_iommu_drvdata *iommu_drvdata;
86 struct msm_iommu_ctx_drvdata *ctx_drvdata;
87 int ret = 0;
88 int asid;
89
90 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
91 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
92 BUG();
93
94 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
95 if (!iommu_drvdata)
96 BUG();
97
98 ret = __enable_clocks(iommu_drvdata);
99 if (ret)
100 goto fail;
101
102 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
103 ctx_drvdata->num);
104
105 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
106 asid | (va & TLBIVA_VA));
107 mb();
108 __disable_clocks(iommu_drvdata);
109 }
110fail:
111 return ret;
112}
113
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700114static void __reset_context(void __iomem *base, int ctx)
115{
116 SET_BPRCOSH(base, ctx, 0);
117 SET_BPRCISH(base, ctx, 0);
118 SET_BPRCNSH(base, ctx, 0);
119 SET_BPSHCFG(base, ctx, 0);
120 SET_BPMTCFG(base, ctx, 0);
121 SET_ACTLR(base, ctx, 0);
122 SET_SCTLR(base, ctx, 0);
123 SET_FSRRESTORE(base, ctx, 0);
124 SET_TTBR0(base, ctx, 0);
125 SET_TTBR1(base, ctx, 0);
126 SET_TTBCR(base, ctx, 0);
127 SET_BFBCR(base, ctx, 0);
128 SET_PAR(base, ctx, 0);
129 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700130 SET_TLBFLPTER(base, ctx, 0);
131 SET_TLBSLPTER(base, ctx, 0);
132 SET_TLBLKCR(base, ctx, 0);
133 SET_PRRR(base, ctx, 0);
134 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700136}
137
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138static void __program_context(void __iomem *base, int ctx, int ncb,
139 phys_addr_t pgtable)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700140{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800141 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700143 __reset_context(base, ctx);
144
145 /* Set up HTW mode */
146 /* TLB miss configuration: perform HTW on miss */
147 SET_TLBMCFG(base, ctx, 0x3);
148
149 /* V2P configuration: HTW for access */
150 SET_V2PCFG(base, ctx, 0x3);
151
152 SET_TTBCR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700154
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700155 /* Set interrupt number to "secure" interrupt */
156 SET_IRPTNDX(base, ctx, 0);
157
158 /* Enable context fault interrupt */
159 SET_CFEIE(base, ctx, 1);
160
161 /* Stall access on a context fault and let the handler deal with it */
162 SET_CFCFG(base, ctx, 1);
163
164 /* Redirect all cacheable requests to L2 slave port. */
165 SET_RCISH(base, ctx, 1);
166 SET_RCOSH(base, ctx, 1);
167 SET_RCNSH(base, ctx, 1);
168
169 /* Turn on TEX Remap */
170 SET_TRE(base, ctx, 1);
171
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800172 /* Set TEX remap attributes */
173 RCP15_PRRR(prrr);
174 RCP15_NMRR(nmrr);
175 SET_PRRR(base, ctx, prrr);
176 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700177
178 /* Turn on BFB prefetch */
179 SET_BFBDFE(base, ctx, 1);
180
181#ifdef CONFIG_IOMMU_PGTABLES_L2
182 /* Configure page tables as inner-cacheable and shareable to reduce
183 * the TLB miss penalty.
184 */
185 SET_TTBR0_SH(base, ctx, 1);
186 SET_TTBR1_SH(base, ctx, 1);
187
188 SET_TTBR0_NOS(base, ctx, 1);
189 SET_TTBR1_NOS(base, ctx, 1);
190
191 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
192 SET_TTBR0_IRGNL(base, ctx, 1);
193
194 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
195 SET_TTBR1_IRGNL(base, ctx, 1);
196
197 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
198 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
199#endif
200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 /* Find if this page table is used elsewhere, and re-use ASID */
202 found = 0;
203 for (i = 0; i < ncb; i++)
204 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
205 i != ctx) {
206 SET_CONTEXTIDR_ASID(base, ctx, \
207 GET_CONTEXTIDR_ASID(base, i));
208 found = 1;
209 break;
210 }
211
212 /* If page table is new, find an unused ASID */
213 if (!found) {
214 for (i = 0; i < ncb; i++) {
215 found = 0;
216 for (j = 0; j < ncb; j++) {
217 if (GET_CONTEXTIDR_ASID(base, j) == i &&
218 j != ctx)
219 found = 1;
220 }
221
222 if (!found) {
223 SET_CONTEXTIDR_ASID(base, ctx, i);
224 break;
225 }
226 }
227 BUG_ON(found);
228 }
229
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700230 /* Enable the MMU */
231 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700233}
234
235static int msm_iommu_domain_init(struct iommu_domain *domain)
236{
237 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
238
239 if (!priv)
240 goto fail_nomem;
241
242 INIT_LIST_HEAD(&priv->list_attached);
243 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
244 get_order(SZ_16K));
245
246 if (!priv->pgtable)
247 goto fail_nomem;
248
249 memset(priv->pgtable, 0, SZ_16K);
250 domain->priv = priv;
251 return 0;
252
253fail_nomem:
254 kfree(priv);
255 return -ENOMEM;
256}
257
258static void msm_iommu_domain_destroy(struct iommu_domain *domain)
259{
260 struct msm_priv *priv;
261 unsigned long flags;
262 unsigned long *fl_table;
263 int i;
264
265 spin_lock_irqsave(&msm_iommu_lock, flags);
266 priv = domain->priv;
267 domain->priv = NULL;
268
269 if (priv) {
270 fl_table = priv->pgtable;
271
272 for (i = 0; i < NUM_FL_PTE; i++)
273 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
274 free_page((unsigned long) __va(((fl_table[i]) &
275 FL_BASE_MASK)));
276
277 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
278 priv->pgtable = NULL;
279 }
280
281 kfree(priv);
282 spin_unlock_irqrestore(&msm_iommu_lock, flags);
283}
284
285static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
286{
287 struct msm_priv *priv;
288 struct msm_iommu_ctx_dev *ctx_dev;
289 struct msm_iommu_drvdata *iommu_drvdata;
290 struct msm_iommu_ctx_drvdata *ctx_drvdata;
291 struct msm_iommu_ctx_drvdata *tmp_drvdata;
292 int ret = 0;
293 unsigned long flags;
294
295 spin_lock_irqsave(&msm_iommu_lock, flags);
296
297 priv = domain->priv;
298
299 if (!priv || !dev) {
300 ret = -EINVAL;
301 goto fail;
302 }
303
304 iommu_drvdata = dev_get_drvdata(dev->parent);
305 ctx_drvdata = dev_get_drvdata(dev);
306 ctx_dev = dev->platform_data;
307
308 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
309 ret = -EINVAL;
310 goto fail;
311 }
312
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800313 if (!list_empty(&ctx_drvdata->attached_elm)) {
314 ret = -EBUSY;
315 goto fail;
316 }
317
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700318 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
319 if (tmp_drvdata == ctx_drvdata) {
320 ret = -EBUSY;
321 goto fail;
322 }
323
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800324 ret = __enable_clocks(iommu_drvdata);
325 if (ret)
326 goto fail;
327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700329 __pa(priv->pgtable));
330
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800331 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700332 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700333
334fail:
335 spin_unlock_irqrestore(&msm_iommu_lock, flags);
336 return ret;
337}
338
339static void msm_iommu_detach_dev(struct iommu_domain *domain,
340 struct device *dev)
341{
342 struct msm_priv *priv;
343 struct msm_iommu_ctx_dev *ctx_dev;
344 struct msm_iommu_drvdata *iommu_drvdata;
345 struct msm_iommu_ctx_drvdata *ctx_drvdata;
346 unsigned long flags;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800347 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700348
349 spin_lock_irqsave(&msm_iommu_lock, flags);
350 priv = domain->priv;
351
352 if (!priv || !dev)
353 goto fail;
354
355 iommu_drvdata = dev_get_drvdata(dev->parent);
356 ctx_drvdata = dev_get_drvdata(dev);
357 ctx_dev = dev->platform_data;
358
359 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
360 goto fail;
361
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800362 ret = __enable_clocks(iommu_drvdata);
363 if (ret)
364 goto fail;
365
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700366 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
367 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
368
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700369 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800370 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700371 list_del_init(&ctx_drvdata->attached_elm);
372
373fail:
374 spin_unlock_irqrestore(&msm_iommu_lock, flags);
375}
376
377static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
378 phys_addr_t pa, int order, int prot)
379{
380 struct msm_priv *priv;
381 unsigned long flags;
382 unsigned long *fl_table;
383 unsigned long *fl_pte;
384 unsigned long fl_offset;
385 unsigned long *sl_table;
386 unsigned long *sl_pte;
387 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800388 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700389 size_t len = 0x1000UL << order;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800390 int ret = 0, tex, sh;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700391
392 spin_lock_irqsave(&msm_iommu_lock, flags);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700393
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800394 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
395 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
396
397 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
398 ret = -EINVAL;
399 goto fail;
400 }
401
402 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700403 if (!priv) {
404 ret = -EINVAL;
405 goto fail;
406 }
407
408 fl_table = priv->pgtable;
409
410 if (len != SZ_16M && len != SZ_1M &&
411 len != SZ_64K && len != SZ_4K) {
412 pr_debug("Bad size: %d\n", len);
413 ret = -EINVAL;
414 goto fail;
415 }
416
417 if (!fl_table) {
418 pr_debug("Null page table\n");
419 ret = -EINVAL;
420 goto fail;
421 }
422
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800423 if (len == SZ_16M || len == SZ_1M) {
424 pgprot = sh ? FL_SHARED : 0;
425 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
426 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
427 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
428 } else {
429 pgprot = sh ? SL_SHARED : 0;
430 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
431 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
432 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
433 }
434
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700435 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
436 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
437
438 if (len == SZ_16M) {
439 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440
441 for (i = 0; i < 16; i++)
442 if (*(fl_pte+i)) {
443 ret = -EBUSY;
444 goto fail;
445 }
446
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700447 for (i = 0; i < 16; i++)
448 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
449 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800450 FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700451
452 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700453 }
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455 if (len == SZ_1M) {
456 if (*fl_pte) {
457 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700458 goto fail;
459 }
460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
462 FL_TYPE_SECT | FL_SHARED | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700463
464 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 }
466
467 /* Need a 2nd level table */
468 if (len == SZ_4K || len == SZ_64K) {
469
470 if (*fl_pte == 0) {
471 unsigned long *sl;
472 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
473 get_order(SZ_4K));
474
475 if (!sl) {
476 pr_debug("Could not allocate second level table\n");
477 ret = -ENOMEM;
478 goto fail;
479 }
480 memset(sl, 0, SZ_4K);
481
482 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
483 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700484
485 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 }
487
488 if (!(*fl_pte & FL_TYPE_TABLE)) {
489 ret = -EBUSY;
490 goto fail;
491 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700492 }
493
494 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
495 sl_offset = SL_OFFSET(va);
496 sl_pte = sl_table + sl_offset;
497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 if (len == SZ_4K) {
499 if (*sl_pte) {
500 ret = -EBUSY;
501 goto fail;
502 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700503
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800504 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800505 SL_SHARED | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700506 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700508
509 if (len == SZ_64K) {
510 int i;
511
512 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 if (*(sl_pte+i)) {
514 ret = -EBUSY;
515 goto fail;
516 }
517
518 for (i = 0; i < 16; i++)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700519 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800520 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700521
522 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700523 }
524
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700525 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700526fail:
527 spin_unlock_irqrestore(&msm_iommu_lock, flags);
528 return ret;
529}
530
531static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
532 int order)
533{
534 struct msm_priv *priv;
535 unsigned long flags;
536 unsigned long *fl_table;
537 unsigned long *fl_pte;
538 unsigned long fl_offset;
539 unsigned long *sl_table;
540 unsigned long *sl_pte;
541 unsigned long sl_offset;
542 size_t len = 0x1000UL << order;
543 int i, ret = 0;
544
545 spin_lock_irqsave(&msm_iommu_lock, flags);
546
547 priv = domain->priv;
548
549 if (!priv) {
550 ret = -ENODEV;
551 goto fail;
552 }
553
554 fl_table = priv->pgtable;
555
556 if (len != SZ_16M && len != SZ_1M &&
557 len != SZ_64K && len != SZ_4K) {
558 pr_debug("Bad length: %d\n", len);
559 ret = -EINVAL;
560 goto fail;
561 }
562
563 if (!fl_table) {
564 pr_debug("Null page table\n");
565 ret = -EINVAL;
566 goto fail;
567 }
568
569 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
570 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
571
572 if (*fl_pte == 0) {
573 pr_debug("First level PTE is 0\n");
574 ret = -ENODEV;
575 goto fail;
576 }
577
578 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700579 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700580 for (i = 0; i < 16; i++)
581 *(fl_pte+i) = 0;
582
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700583 clean_pte(fl_pte, fl_pte + 16);
584 }
585
586 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700587 *fl_pte = 0;
588
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700589 clean_pte(fl_pte, fl_pte + 1);
590 }
591
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700592 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
593 sl_offset = SL_OFFSET(va);
594 sl_pte = sl_table + sl_offset;
595
596 if (len == SZ_64K) {
597 for (i = 0; i < 16; i++)
598 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700599
600 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700601 }
602
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700603 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700604 *sl_pte = 0;
605
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700606 clean_pte(sl_pte, sl_pte + 1);
607 }
608
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700609 if (len == SZ_4K || len == SZ_64K) {
610 int used = 0;
611
612 for (i = 0; i < NUM_SL_PTE; i++)
613 if (sl_table[i])
614 used = 1;
615 if (!used) {
616 free_page((unsigned long)sl_table);
617 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700618
619 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700620 }
621 }
622
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700623 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700624fail:
625 spin_unlock_irqrestore(&msm_iommu_lock, flags);
626 return ret;
627}
628
629static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
630 unsigned long va)
631{
632 struct msm_priv *priv;
633 struct msm_iommu_drvdata *iommu_drvdata;
634 struct msm_iommu_ctx_drvdata *ctx_drvdata;
635 unsigned int par;
636 unsigned long flags;
637 void __iomem *base;
638 phys_addr_t ret = 0;
639 int ctx;
640
641 spin_lock_irqsave(&msm_iommu_lock, flags);
642
643 priv = domain->priv;
644 if (list_empty(&priv->list_attached))
645 goto fail;
646
647 ctx_drvdata = list_entry(priv->list_attached.next,
648 struct msm_iommu_ctx_drvdata, attached_elm);
649 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
650
651 base = iommu_drvdata->base;
652 ctx = ctx_drvdata->num;
653
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800654 ret = __enable_clocks(iommu_drvdata);
655 if (ret)
656 goto fail;
657
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800658 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700659
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700661 par = GET_PAR(base, ctx);
662
663 /* We are dealing with a supersection */
664 if (GET_NOFAULT_SS(base, ctx))
665 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
666 else /* Upper 20 bits from PAR, lower 12 from VA */
667 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
668
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800669 if (GET_FAULT(base, ctx))
670 ret = 0;
671
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800672 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700673fail:
674 spin_unlock_irqrestore(&msm_iommu_lock, flags);
675 return ret;
676}
677
678static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
679 unsigned long cap)
680{
681 return 0;
682}
683
684static void print_ctx_regs(void __iomem *base, int ctx)
685{
686 unsigned int fsr = GET_FSR(base, ctx);
687 pr_err("FAR = %08x PAR = %08x\n",
688 GET_FAR(base, ctx), GET_PAR(base, ctx));
689 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
690 (fsr & 0x02) ? "TF " : "",
691 (fsr & 0x04) ? "AFF " : "",
692 (fsr & 0x08) ? "APF " : "",
693 (fsr & 0x10) ? "TLBMF " : "",
694 (fsr & 0x20) ? "HTWDEEF " : "",
695 (fsr & 0x40) ? "HTWSEEF " : "",
696 (fsr & 0x80) ? "MHF " : "",
697 (fsr & 0x10000) ? "SL " : "",
698 (fsr & 0x40000000) ? "SS " : "",
699 (fsr & 0x80000000) ? "MULTI " : "");
700
701 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
702 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
703 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
704 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
705 pr_err("SCTLR = %08x ACTLR = %08x\n",
706 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
707 pr_err("PRRR = %08x NMRR = %08x\n",
708 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
709}
710
711irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
712{
713 struct msm_iommu_drvdata *drvdata = dev_id;
714 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800715 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800716 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700717
718 spin_lock(&msm_iommu_lock);
719
720 if (!drvdata) {
721 pr_err("Invalid device ID in context interrupt handler\n");
722 goto fail;
723 }
724
725 base = drvdata->base;
726
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700727 pr_err("Unexpected IOMMU page fault!\n");
728 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700730
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800731 ret = __enable_clocks(drvdata);
732 if (ret)
733 goto fail;
734
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800735 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700736 fsr = GET_FSR(base, i);
737 if (fsr) {
738 pr_err("Fault occurred in context %d.\n", i);
739 pr_err("Interesting registers:\n");
740 print_ctx_regs(base, i);
741 SET_FSR(base, i, 0x4000000F);
742 }
743 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800744 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700745fail:
746 spin_unlock(&msm_iommu_lock);
747 return 0;
748}
749
750static struct iommu_ops msm_iommu_ops = {
751 .domain_init = msm_iommu_domain_init,
752 .domain_destroy = msm_iommu_domain_destroy,
753 .attach_dev = msm_iommu_attach_dev,
754 .detach_dev = msm_iommu_detach_dev,
755 .map = msm_iommu_map,
756 .unmap = msm_iommu_unmap,
757 .iova_to_phys = msm_iommu_iova_to_phys,
758 .domain_has_cap = msm_iommu_domain_has_cap
759};
760
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800761static int __init get_tex_class(int icp, int ocp, int mt, int nos)
762{
763 int i = 0;
764 unsigned int prrr = 0;
765 unsigned int nmrr = 0;
766 int c_icp, c_ocp, c_mt, c_nos;
767
768 RCP15_PRRR(prrr);
769 RCP15_NMRR(nmrr);
770
771 for (i = 0; i < NUM_TEX_CLASS; i++) {
772 c_nos = PRRR_NOS(prrr, i);
773 c_mt = PRRR_MT(prrr, i);
774 c_icp = NMRR_ICP(nmrr, i);
775 c_ocp = NMRR_OCP(nmrr, i);
776
777 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
778 return i;
779 }
780
781 return -ENODEV;
782}
783
784static void __init setup_iommu_tex_classes(void)
785{
786 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
787 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
788
789 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
790 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
791
792 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
793 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
794
795 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
796 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
797}
798
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -0800799static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700800{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800801 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700802 register_iommu(&msm_iommu_ops);
803 return 0;
804}
805
806subsys_initcall(msm_iommu_init);
807
808MODULE_LICENSE("GPL v2");
809MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");