blob: 753cc3351e7223c1723ddb9edb0b5b425d946399 [file] [log] [blame]
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070025
26#include <asm/cacheflush.h>
27#include <asm/sizes.h>
28
29#include <mach/iommu_hw-8xxx.h>
30#include <mach/iommu.h>
31
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080032#define MRC(reg, processor, op1, crn, crm, op2) \
33__asm__ __volatile__ ( \
34" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
35: "=r" (reg))
36
37#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
38#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
39
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070040#ifndef CONFIG_IOMMU_PGTABLES_L2
41static inline void clean_pte(unsigned long *start, unsigned long *end)
42{
43 dmac_flush_range(start, end);
44}
45#else
46static inline void clean_pte(unsigned long *start, unsigned long *end) { }
47#endif
48
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080049static int msm_iommu_tex_class[4];
50
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070051DEFINE_SPINLOCK(msm_iommu_lock);
52
53struct msm_priv {
54 unsigned long *pgtable;
55 struct list_head list_attached;
56};
57
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080058static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
59{
60 int ret;
61
62 ret = clk_enable(drvdata->pclk);
63 if (ret)
64 goto fail;
65
66 if (drvdata->clk) {
67 ret = clk_enable(drvdata->clk);
68 if (ret)
69 clk_disable(drvdata->pclk);
70 }
71fail:
72 return ret;
73}
74
75static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
76{
77 if (drvdata->clk)
78 clk_disable(drvdata->clk);
79 clk_disable(drvdata->pclk);
80}
81
Stepan Moskovchenko33069732010-11-12 19:30:00 -080082static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070083{
84 struct msm_priv *priv = domain->priv;
85 struct msm_iommu_drvdata *iommu_drvdata;
86 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -080087 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070088
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070089 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
90 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
91 BUG();
92
93 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080094 BUG_ON(!iommu_drvdata);
Stepan Moskovchenko33069732010-11-12 19:30:00 -080095
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080096 ret = __enable_clocks(iommu_drvdata);
97 if (ret)
98 goto fail;
99
100 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700101 mb();
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800102 __disable_clocks(iommu_drvdata);
103 }
104fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800105 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700106}
107
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700108static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
109{
110 struct msm_priv *priv = domain->priv;
111 struct msm_iommu_drvdata *iommu_drvdata;
112 struct msm_iommu_ctx_drvdata *ctx_drvdata;
113 int ret = 0;
114 int asid;
115
116 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
117 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
118 BUG();
119
120 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
121 if (!iommu_drvdata)
122 BUG();
123
124 ret = __enable_clocks(iommu_drvdata);
125 if (ret)
126 goto fail;
127
128 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
129 ctx_drvdata->num);
130
131 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
132 asid | (va & TLBIVA_VA));
133 mb();
134 __disable_clocks(iommu_drvdata);
135 }
136fail:
137 return ret;
138}
139
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700140static void __reset_context(void __iomem *base, int ctx)
141{
142 SET_BPRCOSH(base, ctx, 0);
143 SET_BPRCISH(base, ctx, 0);
144 SET_BPRCNSH(base, ctx, 0);
145 SET_BPSHCFG(base, ctx, 0);
146 SET_BPMTCFG(base, ctx, 0);
147 SET_ACTLR(base, ctx, 0);
148 SET_SCTLR(base, ctx, 0);
149 SET_FSRRESTORE(base, ctx, 0);
150 SET_TTBR0(base, ctx, 0);
151 SET_TTBR1(base, ctx, 0);
152 SET_TTBCR(base, ctx, 0);
153 SET_BFBCR(base, ctx, 0);
154 SET_PAR(base, ctx, 0);
155 SET_FAR(base, ctx, 0);
156 SET_CTX_TLBIALL(base, ctx, 0);
157 SET_TLBFLPTER(base, ctx, 0);
158 SET_TLBSLPTER(base, ctx, 0);
159 SET_TLBLKCR(base, ctx, 0);
160 SET_PRRR(base, ctx, 0);
161 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700162 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700163}
164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165static void __program_context(void __iomem *base, int ctx, int ncb,
166 phys_addr_t pgtable)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700167{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800168 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700170 __reset_context(base, ctx);
171
172 /* Set up HTW mode */
173 /* TLB miss configuration: perform HTW on miss */
174 SET_TLBMCFG(base, ctx, 0x3);
175
176 /* V2P configuration: HTW for access */
177 SET_V2PCFG(base, ctx, 0x3);
178
179 SET_TTBCR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700181
182 /* Invalidate the TLB for this context */
183 SET_CTX_TLBIALL(base, ctx, 0);
184
185 /* Set interrupt number to "secure" interrupt */
186 SET_IRPTNDX(base, ctx, 0);
187
188 /* Enable context fault interrupt */
189 SET_CFEIE(base, ctx, 1);
190
191 /* Stall access on a context fault and let the handler deal with it */
192 SET_CFCFG(base, ctx, 1);
193
194 /* Redirect all cacheable requests to L2 slave port. */
195 SET_RCISH(base, ctx, 1);
196 SET_RCOSH(base, ctx, 1);
197 SET_RCNSH(base, ctx, 1);
198
199 /* Turn on TEX Remap */
200 SET_TRE(base, ctx, 1);
201
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800202 /* Set TEX remap attributes */
203 RCP15_PRRR(prrr);
204 RCP15_NMRR(nmrr);
205 SET_PRRR(base, ctx, prrr);
206 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700207
208 /* Turn on BFB prefetch */
209 SET_BFBDFE(base, ctx, 1);
210
211#ifdef CONFIG_IOMMU_PGTABLES_L2
212 /* Configure page tables as inner-cacheable and shareable to reduce
213 * the TLB miss penalty.
214 */
215 SET_TTBR0_SH(base, ctx, 1);
216 SET_TTBR1_SH(base, ctx, 1);
217
218 SET_TTBR0_NOS(base, ctx, 1);
219 SET_TTBR1_NOS(base, ctx, 1);
220
221 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
222 SET_TTBR0_IRGNL(base, ctx, 1);
223
224 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
225 SET_TTBR1_IRGNL(base, ctx, 1);
226
227 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
228 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
229#endif
230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231 /* Find if this page table is used elsewhere, and re-use ASID */
232 found = 0;
233 for (i = 0; i < ncb; i++)
234 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
235 i != ctx) {
236 SET_CONTEXTIDR_ASID(base, ctx, \
237 GET_CONTEXTIDR_ASID(base, i));
238 found = 1;
239 break;
240 }
241
242 /* If page table is new, find an unused ASID */
243 if (!found) {
244 for (i = 0; i < ncb; i++) {
245 found = 0;
246 for (j = 0; j < ncb; j++) {
247 if (GET_CONTEXTIDR_ASID(base, j) == i &&
248 j != ctx)
249 found = 1;
250 }
251
252 if (!found) {
253 SET_CONTEXTIDR_ASID(base, ctx, i);
254 break;
255 }
256 }
257 BUG_ON(found);
258 }
259
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700260 /* Enable the MMU */
261 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700263}
264
265static int msm_iommu_domain_init(struct iommu_domain *domain)
266{
267 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
268
269 if (!priv)
270 goto fail_nomem;
271
272 INIT_LIST_HEAD(&priv->list_attached);
273 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
274 get_order(SZ_16K));
275
276 if (!priv->pgtable)
277 goto fail_nomem;
278
279 memset(priv->pgtable, 0, SZ_16K);
280 domain->priv = priv;
281 return 0;
282
283fail_nomem:
284 kfree(priv);
285 return -ENOMEM;
286}
287
288static void msm_iommu_domain_destroy(struct iommu_domain *domain)
289{
290 struct msm_priv *priv;
291 unsigned long flags;
292 unsigned long *fl_table;
293 int i;
294
295 spin_lock_irqsave(&msm_iommu_lock, flags);
296 priv = domain->priv;
297 domain->priv = NULL;
298
299 if (priv) {
300 fl_table = priv->pgtable;
301
302 for (i = 0; i < NUM_FL_PTE; i++)
303 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
304 free_page((unsigned long) __va(((fl_table[i]) &
305 FL_BASE_MASK)));
306
307 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
308 priv->pgtable = NULL;
309 }
310
311 kfree(priv);
312 spin_unlock_irqrestore(&msm_iommu_lock, flags);
313}
314
315static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
316{
317 struct msm_priv *priv;
318 struct msm_iommu_ctx_dev *ctx_dev;
319 struct msm_iommu_drvdata *iommu_drvdata;
320 struct msm_iommu_ctx_drvdata *ctx_drvdata;
321 struct msm_iommu_ctx_drvdata *tmp_drvdata;
322 int ret = 0;
323 unsigned long flags;
324
325 spin_lock_irqsave(&msm_iommu_lock, flags);
326
327 priv = domain->priv;
328
329 if (!priv || !dev) {
330 ret = -EINVAL;
331 goto fail;
332 }
333
334 iommu_drvdata = dev_get_drvdata(dev->parent);
335 ctx_drvdata = dev_get_drvdata(dev);
336 ctx_dev = dev->platform_data;
337
338 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
339 ret = -EINVAL;
340 goto fail;
341 }
342
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800343 if (!list_empty(&ctx_drvdata->attached_elm)) {
344 ret = -EBUSY;
345 goto fail;
346 }
347
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700348 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
349 if (tmp_drvdata == ctx_drvdata) {
350 ret = -EBUSY;
351 goto fail;
352 }
353
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800354 ret = __enable_clocks(iommu_drvdata);
355 if (ret)
356 goto fail;
357
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700359 __pa(priv->pgtable));
360
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800361 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700362 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800363 ret = __flush_iotlb(domain);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700364
365fail:
366 spin_unlock_irqrestore(&msm_iommu_lock, flags);
367 return ret;
368}
369
370static void msm_iommu_detach_dev(struct iommu_domain *domain,
371 struct device *dev)
372{
373 struct msm_priv *priv;
374 struct msm_iommu_ctx_dev *ctx_dev;
375 struct msm_iommu_drvdata *iommu_drvdata;
376 struct msm_iommu_ctx_drvdata *ctx_drvdata;
377 unsigned long flags;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800378 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700379
380 spin_lock_irqsave(&msm_iommu_lock, flags);
381 priv = domain->priv;
382
383 if (!priv || !dev)
384 goto fail;
385
386 iommu_drvdata = dev_get_drvdata(dev->parent);
387 ctx_drvdata = dev_get_drvdata(dev);
388 ctx_dev = dev->platform_data;
389
390 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
391 goto fail;
392
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800393 ret = __flush_iotlb(domain);
394 if (ret)
395 goto fail;
396
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800397 ret = __enable_clocks(iommu_drvdata);
398 if (ret)
399 goto fail;
400
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700401 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800402 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700403 list_del_init(&ctx_drvdata->attached_elm);
404
405fail:
406 spin_unlock_irqrestore(&msm_iommu_lock, flags);
407}
408
409static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
410 phys_addr_t pa, int order, int prot)
411{
412 struct msm_priv *priv;
413 unsigned long flags;
414 unsigned long *fl_table;
415 unsigned long *fl_pte;
416 unsigned long fl_offset;
417 unsigned long *sl_table;
418 unsigned long *sl_pte;
419 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800420 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700421 size_t len = 0x1000UL << order;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800422 int ret = 0, tex, sh;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700423
424 spin_lock_irqsave(&msm_iommu_lock, flags);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700425
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800426 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
427 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
428
429 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
430 ret = -EINVAL;
431 goto fail;
432 }
433
434 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700435 if (!priv) {
436 ret = -EINVAL;
437 goto fail;
438 }
439
440 fl_table = priv->pgtable;
441
442 if (len != SZ_16M && len != SZ_1M &&
443 len != SZ_64K && len != SZ_4K) {
444 pr_debug("Bad size: %d\n", len);
445 ret = -EINVAL;
446 goto fail;
447 }
448
449 if (!fl_table) {
450 pr_debug("Null page table\n");
451 ret = -EINVAL;
452 goto fail;
453 }
454
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800455 if (len == SZ_16M || len == SZ_1M) {
456 pgprot = sh ? FL_SHARED : 0;
457 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
458 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
459 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
460 } else {
461 pgprot = sh ? SL_SHARED : 0;
462 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
463 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
464 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
465 }
466
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700467 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
468 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
469
470 if (len == SZ_16M) {
471 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472
473 for (i = 0; i < 16; i++)
474 if (*(fl_pte+i)) {
475 ret = -EBUSY;
476 goto fail;
477 }
478
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700479 for (i = 0; i < 16; i++)
480 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
481 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800482 FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700483
484 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700485 }
486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (len == SZ_1M) {
488 if (*fl_pte) {
489 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700490 goto fail;
491 }
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
494 FL_TYPE_SECT | FL_SHARED | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700495
496 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 }
498
499 /* Need a 2nd level table */
500 if (len == SZ_4K || len == SZ_64K) {
501
502 if (*fl_pte == 0) {
503 unsigned long *sl;
504 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
505 get_order(SZ_4K));
506
507 if (!sl) {
508 pr_debug("Could not allocate second level table\n");
509 ret = -ENOMEM;
510 goto fail;
511 }
512 memset(sl, 0, SZ_4K);
513
514 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
515 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700516
517 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 }
519
520 if (!(*fl_pte & FL_TYPE_TABLE)) {
521 ret = -EBUSY;
522 goto fail;
523 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700524 }
525
526 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
527 sl_offset = SL_OFFSET(va);
528 sl_pte = sl_table + sl_offset;
529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 if (len == SZ_4K) {
531 if (*sl_pte) {
532 ret = -EBUSY;
533 goto fail;
534 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700535
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800536 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800537 SL_SHARED | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700538 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700540
541 if (len == SZ_64K) {
542 int i;
543
544 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 if (*(sl_pte+i)) {
546 ret = -EBUSY;
547 goto fail;
548 }
549
550 for (i = 0; i < 16; i++)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700551 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800552 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700553
554 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700555 }
556
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700557 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700558fail:
559 spin_unlock_irqrestore(&msm_iommu_lock, flags);
560 return ret;
561}
562
563static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
564 int order)
565{
566 struct msm_priv *priv;
567 unsigned long flags;
568 unsigned long *fl_table;
569 unsigned long *fl_pte;
570 unsigned long fl_offset;
571 unsigned long *sl_table;
572 unsigned long *sl_pte;
573 unsigned long sl_offset;
574 size_t len = 0x1000UL << order;
575 int i, ret = 0;
576
577 spin_lock_irqsave(&msm_iommu_lock, flags);
578
579 priv = domain->priv;
580
581 if (!priv) {
582 ret = -ENODEV;
583 goto fail;
584 }
585
586 fl_table = priv->pgtable;
587
588 if (len != SZ_16M && len != SZ_1M &&
589 len != SZ_64K && len != SZ_4K) {
590 pr_debug("Bad length: %d\n", len);
591 ret = -EINVAL;
592 goto fail;
593 }
594
595 if (!fl_table) {
596 pr_debug("Null page table\n");
597 ret = -EINVAL;
598 goto fail;
599 }
600
601 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
602 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
603
604 if (*fl_pte == 0) {
605 pr_debug("First level PTE is 0\n");
606 ret = -ENODEV;
607 goto fail;
608 }
609
610 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700611 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700612 for (i = 0; i < 16; i++)
613 *(fl_pte+i) = 0;
614
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700615 clean_pte(fl_pte, fl_pte + 16);
616 }
617
618 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700619 *fl_pte = 0;
620
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700621 clean_pte(fl_pte, fl_pte + 1);
622 }
623
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700624 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
625 sl_offset = SL_OFFSET(va);
626 sl_pte = sl_table + sl_offset;
627
628 if (len == SZ_64K) {
629 for (i = 0; i < 16; i++)
630 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700631
632 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700633 }
634
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700635 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700636 *sl_pte = 0;
637
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700638 clean_pte(sl_pte, sl_pte + 1);
639 }
640
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700641 if (len == SZ_4K || len == SZ_64K) {
642 int used = 0;
643
644 for (i = 0; i < NUM_SL_PTE; i++)
645 if (sl_table[i])
646 used = 1;
647 if (!used) {
648 free_page((unsigned long)sl_table);
649 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700650
651 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700652 }
653 }
654
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700655 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700656fail:
657 spin_unlock_irqrestore(&msm_iommu_lock, flags);
658 return ret;
659}
660
661static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
662 unsigned long va)
663{
664 struct msm_priv *priv;
665 struct msm_iommu_drvdata *iommu_drvdata;
666 struct msm_iommu_ctx_drvdata *ctx_drvdata;
667 unsigned int par;
668 unsigned long flags;
669 void __iomem *base;
670 phys_addr_t ret = 0;
671 int ctx;
672
673 spin_lock_irqsave(&msm_iommu_lock, flags);
674
675 priv = domain->priv;
676 if (list_empty(&priv->list_attached))
677 goto fail;
678
679 ctx_drvdata = list_entry(priv->list_attached.next,
680 struct msm_iommu_ctx_drvdata, attached_elm);
681 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
682
683 base = iommu_drvdata->base;
684 ctx = ctx_drvdata->num;
685
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800686 ret = __enable_clocks(iommu_drvdata);
687 if (ret)
688 goto fail;
689
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800690 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700691
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700692 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700693 par = GET_PAR(base, ctx);
694
695 /* We are dealing with a supersection */
696 if (GET_NOFAULT_SS(base, ctx))
697 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
698 else /* Upper 20 bits from PAR, lower 12 from VA */
699 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
700
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800701 if (GET_FAULT(base, ctx))
702 ret = 0;
703
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800704 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700705fail:
706 spin_unlock_irqrestore(&msm_iommu_lock, flags);
707 return ret;
708}
709
710static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
711 unsigned long cap)
712{
713 return 0;
714}
715
716static void print_ctx_regs(void __iomem *base, int ctx)
717{
718 unsigned int fsr = GET_FSR(base, ctx);
719 pr_err("FAR = %08x PAR = %08x\n",
720 GET_FAR(base, ctx), GET_PAR(base, ctx));
721 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
722 (fsr & 0x02) ? "TF " : "",
723 (fsr & 0x04) ? "AFF " : "",
724 (fsr & 0x08) ? "APF " : "",
725 (fsr & 0x10) ? "TLBMF " : "",
726 (fsr & 0x20) ? "HTWDEEF " : "",
727 (fsr & 0x40) ? "HTWSEEF " : "",
728 (fsr & 0x80) ? "MHF " : "",
729 (fsr & 0x10000) ? "SL " : "",
730 (fsr & 0x40000000) ? "SS " : "",
731 (fsr & 0x80000000) ? "MULTI " : "");
732
733 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
734 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
735 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
736 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
737 pr_err("SCTLR = %08x ACTLR = %08x\n",
738 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
739 pr_err("PRRR = %08x NMRR = %08x\n",
740 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
741}
742
743irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
744{
745 struct msm_iommu_drvdata *drvdata = dev_id;
746 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800747 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800748 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700749
750 spin_lock(&msm_iommu_lock);
751
752 if (!drvdata) {
753 pr_err("Invalid device ID in context interrupt handler\n");
754 goto fail;
755 }
756
757 base = drvdata->base;
758
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700759 pr_err("Unexpected IOMMU page fault!\n");
760 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700762
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800763 ret = __enable_clocks(drvdata);
764 if (ret)
765 goto fail;
766
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800767 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700768 fsr = GET_FSR(base, i);
769 if (fsr) {
770 pr_err("Fault occurred in context %d.\n", i);
771 pr_err("Interesting registers:\n");
772 print_ctx_regs(base, i);
773 SET_FSR(base, i, 0x4000000F);
774 }
775 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800776 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700777fail:
778 spin_unlock(&msm_iommu_lock);
779 return 0;
780}
781
782static struct iommu_ops msm_iommu_ops = {
783 .domain_init = msm_iommu_domain_init,
784 .domain_destroy = msm_iommu_domain_destroy,
785 .attach_dev = msm_iommu_attach_dev,
786 .detach_dev = msm_iommu_detach_dev,
787 .map = msm_iommu_map,
788 .unmap = msm_iommu_unmap,
789 .iova_to_phys = msm_iommu_iova_to_phys,
790 .domain_has_cap = msm_iommu_domain_has_cap
791};
792
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800793static int __init get_tex_class(int icp, int ocp, int mt, int nos)
794{
795 int i = 0;
796 unsigned int prrr = 0;
797 unsigned int nmrr = 0;
798 int c_icp, c_ocp, c_mt, c_nos;
799
800 RCP15_PRRR(prrr);
801 RCP15_NMRR(nmrr);
802
803 for (i = 0; i < NUM_TEX_CLASS; i++) {
804 c_nos = PRRR_NOS(prrr, i);
805 c_mt = PRRR_MT(prrr, i);
806 c_icp = NMRR_ICP(nmrr, i);
807 c_ocp = NMRR_OCP(nmrr, i);
808
809 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
810 return i;
811 }
812
813 return -ENODEV;
814}
815
816static void __init setup_iommu_tex_classes(void)
817{
818 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
819 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
820
821 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
822 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
823
824 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
825 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
826
827 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
828 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
829}
830
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -0800831static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700832{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800833 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700834 register_iommu(&msm_iommu_ops);
835 return 0;
836}
837
838subsys_initcall(msm_iommu_init);
839
840MODULE_LICENSE("GPL v2");
841MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");