blob: e8746632b8cf5d94e1aaa45b8de6a7b8092e4982 [file] [log] [blame]
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070041static inline void clean_pte(unsigned long *start, unsigned long *end)
42{
43 dmac_flush_range(start, end);
44}
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070045
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080046static int msm_iommu_tex_class[4];
47
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070048DEFINE_SPINLOCK(msm_iommu_lock);
49
50struct msm_priv {
51 unsigned long *pgtable;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -070052 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070053 struct list_head list_attached;
54};
55
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080056static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
57{
58 int ret;
59
60 ret = clk_enable(drvdata->pclk);
61 if (ret)
62 goto fail;
63
64 if (drvdata->clk) {
65 ret = clk_enable(drvdata->clk);
66 if (ret)
67 clk_disable(drvdata->pclk);
68 }
69fail:
70 return ret;
71}
72
73static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
74{
75 if (drvdata->clk)
76 clk_disable(drvdata->clk);
77 clk_disable(drvdata->pclk);
78}
79
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070080static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
81{
82 struct msm_priv *priv = domain->priv;
83 struct msm_iommu_drvdata *iommu_drvdata;
84 struct msm_iommu_ctx_drvdata *ctx_drvdata;
85 int ret = 0;
86 int asid;
87
88 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
89 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
90 BUG();
91
92 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
93 if (!iommu_drvdata)
94 BUG();
95
96 ret = __enable_clocks(iommu_drvdata);
97 if (ret)
98 goto fail;
99
100 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
101 ctx_drvdata->num);
102
103 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
104 asid | (va & TLBIVA_VA));
105 mb();
106 __disable_clocks(iommu_drvdata);
107 }
108fail:
109 return ret;
110}
111
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700112static int __flush_iotlb(struct iommu_domain *domain)
113{
114 struct msm_priv *priv = domain->priv;
115 struct msm_iommu_drvdata *iommu_drvdata;
116 struct msm_iommu_ctx_drvdata *ctx_drvdata;
117 int ret = 0;
118 int asid;
119
120 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
121 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
122 BUG();
123
124 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
125 if (!iommu_drvdata)
126 BUG();
127
128 ret = __enable_clocks(iommu_drvdata);
129 if (ret)
130 goto fail;
131
132 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
133 ctx_drvdata->num);
134
135 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
136 mb();
137 __disable_clocks(iommu_drvdata);
138 }
139fail:
140 return ret;
141}
142
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700143static void __reset_context(void __iomem *base, int ctx)
144{
145 SET_BPRCOSH(base, ctx, 0);
146 SET_BPRCISH(base, ctx, 0);
147 SET_BPRCNSH(base, ctx, 0);
148 SET_BPSHCFG(base, ctx, 0);
149 SET_BPMTCFG(base, ctx, 0);
150 SET_ACTLR(base, ctx, 0);
151 SET_SCTLR(base, ctx, 0);
152 SET_FSRRESTORE(base, ctx, 0);
153 SET_TTBR0(base, ctx, 0);
154 SET_TTBR1(base, ctx, 0);
155 SET_TTBCR(base, ctx, 0);
156 SET_BFBCR(base, ctx, 0);
157 SET_PAR(base, ctx, 0);
158 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700159 SET_TLBFLPTER(base, ctx, 0);
160 SET_TLBSLPTER(base, ctx, 0);
161 SET_TLBLKCR(base, ctx, 0);
162 SET_PRRR(base, ctx, 0);
163 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700165}
166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167static void __program_context(void __iomem *base, int ctx, int ncb,
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700168 phys_addr_t pgtable, int redirect)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700169{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800170 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700172 __reset_context(base, ctx);
173
174 /* Set up HTW mode */
175 /* TLB miss configuration: perform HTW on miss */
176 SET_TLBMCFG(base, ctx, 0x3);
177
178 /* V2P configuration: HTW for access */
179 SET_V2PCFG(base, ctx, 0x3);
180
181 SET_TTBCR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700183
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700184 /* Enable context fault interrupt */
185 SET_CFEIE(base, ctx, 1);
186
187 /* Stall access on a context fault and let the handler deal with it */
188 SET_CFCFG(base, ctx, 1);
189
190 /* Redirect all cacheable requests to L2 slave port. */
191 SET_RCISH(base, ctx, 1);
192 SET_RCOSH(base, ctx, 1);
193 SET_RCNSH(base, ctx, 1);
194
195 /* Turn on TEX Remap */
196 SET_TRE(base, ctx, 1);
197
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800198 /* Set TEX remap attributes */
199 RCP15_PRRR(prrr);
200 RCP15_NMRR(nmrr);
201 SET_PRRR(base, ctx, prrr);
202 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700203
204 /* Turn on BFB prefetch */
205 SET_BFBDFE(base, ctx, 1);
206
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700207 /* Configure page tables as inner-cacheable and shareable to reduce
208 * the TLB miss penalty.
209 */
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700210 if (redirect) {
211 SET_TTBR0_SH(base, ctx, 1);
212 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700213
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700214 SET_TTBR0_NOS(base, ctx, 1);
215 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700216
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700217 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
218 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700219
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700220 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
221 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700222
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700223 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
224 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
225 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 /* Find if this page table is used elsewhere, and re-use ASID */
228 found = 0;
229 for (i = 0; i < ncb; i++)
230 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
231 i != ctx) {
232 SET_CONTEXTIDR_ASID(base, ctx, \
233 GET_CONTEXTIDR_ASID(base, i));
234 found = 1;
235 break;
236 }
237
238 /* If page table is new, find an unused ASID */
239 if (!found) {
240 for (i = 0; i < ncb; i++) {
241 found = 0;
242 for (j = 0; j < ncb; j++) {
243 if (GET_CONTEXTIDR_ASID(base, j) == i &&
244 j != ctx)
245 found = 1;
246 }
247
248 if (!found) {
249 SET_CONTEXTIDR_ASID(base, ctx, i);
250 break;
251 }
252 }
253 BUG_ON(found);
254 }
255
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700256 /* Enable the MMU */
257 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700259}
260
Stepan Moskovchenkoff2d3662011-08-31 17:13:32 -0700261static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700262{
263 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
264
265 if (!priv)
266 goto fail_nomem;
267
268 INIT_LIST_HEAD(&priv->list_attached);
269 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
270 get_order(SZ_16K));
271
272 if (!priv->pgtable)
273 goto fail_nomem;
274
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700275#ifdef CONFIG_IOMMU_PGTABLES_L2
276 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
277#endif
278
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700279 memset(priv->pgtable, 0, SZ_16K);
280 domain->priv = priv;
281 return 0;
282
283fail_nomem:
284 kfree(priv);
285 return -ENOMEM;
286}
287
288static void msm_iommu_domain_destroy(struct iommu_domain *domain)
289{
290 struct msm_priv *priv;
291 unsigned long flags;
292 unsigned long *fl_table;
293 int i;
294
295 spin_lock_irqsave(&msm_iommu_lock, flags);
296 priv = domain->priv;
297 domain->priv = NULL;
298
299 if (priv) {
300 fl_table = priv->pgtable;
301
302 for (i = 0; i < NUM_FL_PTE; i++)
303 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
304 free_page((unsigned long) __va(((fl_table[i]) &
305 FL_BASE_MASK)));
306
307 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
308 priv->pgtable = NULL;
309 }
310
311 kfree(priv);
312 spin_unlock_irqrestore(&msm_iommu_lock, flags);
313}
314
315static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
316{
317 struct msm_priv *priv;
318 struct msm_iommu_ctx_dev *ctx_dev;
319 struct msm_iommu_drvdata *iommu_drvdata;
320 struct msm_iommu_ctx_drvdata *ctx_drvdata;
321 struct msm_iommu_ctx_drvdata *tmp_drvdata;
322 int ret = 0;
323 unsigned long flags;
324
325 spin_lock_irqsave(&msm_iommu_lock, flags);
326
327 priv = domain->priv;
328
329 if (!priv || !dev) {
330 ret = -EINVAL;
331 goto fail;
332 }
333
334 iommu_drvdata = dev_get_drvdata(dev->parent);
335 ctx_drvdata = dev_get_drvdata(dev);
336 ctx_dev = dev->platform_data;
337
338 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
339 ret = -EINVAL;
340 goto fail;
341 }
342
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800343 if (!list_empty(&ctx_drvdata->attached_elm)) {
344 ret = -EBUSY;
345 goto fail;
346 }
347
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700348 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
349 if (tmp_drvdata == ctx_drvdata) {
350 ret = -EBUSY;
351 goto fail;
352 }
353
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800354 ret = __enable_clocks(iommu_drvdata);
355 if (ret)
356 goto fail;
357
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700359 __pa(priv->pgtable), priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700360
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800361 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700362 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700363
364fail:
365 spin_unlock_irqrestore(&msm_iommu_lock, flags);
366 return ret;
367}
368
369static void msm_iommu_detach_dev(struct iommu_domain *domain,
370 struct device *dev)
371{
372 struct msm_priv *priv;
373 struct msm_iommu_ctx_dev *ctx_dev;
374 struct msm_iommu_drvdata *iommu_drvdata;
375 struct msm_iommu_ctx_drvdata *ctx_drvdata;
376 unsigned long flags;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800377 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700378
379 spin_lock_irqsave(&msm_iommu_lock, flags);
380 priv = domain->priv;
381
382 if (!priv || !dev)
383 goto fail;
384
385 iommu_drvdata = dev_get_drvdata(dev->parent);
386 ctx_drvdata = dev_get_drvdata(dev);
387 ctx_dev = dev->platform_data;
388
389 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
390 goto fail;
391
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800392 ret = __enable_clocks(iommu_drvdata);
393 if (ret)
394 goto fail;
395
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700396 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
397 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
398
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700399 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800400 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700401 list_del_init(&ctx_drvdata->attached_elm);
402
403fail:
404 spin_unlock_irqrestore(&msm_iommu_lock, flags);
405}
406
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700407static int __get_pgprot(int prot, int len)
408{
409 unsigned int pgprot;
410 int tex, sh;
411
412 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
413 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
414
415 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
416 return 0;
417
418 if (len == SZ_16M || len == SZ_1M) {
419 pgprot = sh ? FL_SHARED : 0;
420 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
421 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
422 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
423 } else {
424 pgprot = sh ? SL_SHARED : 0;
425 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
426 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
427 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
428 }
429
430 return pgprot;
431}
432
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700433static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
434 phys_addr_t pa, int order, int prot)
435{
436 struct msm_priv *priv;
437 unsigned long flags;
438 unsigned long *fl_table;
439 unsigned long *fl_pte;
440 unsigned long fl_offset;
441 unsigned long *sl_table;
442 unsigned long *sl_pte;
443 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800444 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700445 size_t len = 0x1000UL << order;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700446 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700447
448 spin_lock_irqsave(&msm_iommu_lock, flags);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700449
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800450 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700451 if (!priv) {
452 ret = -EINVAL;
453 goto fail;
454 }
455
456 fl_table = priv->pgtable;
457
458 if (len != SZ_16M && len != SZ_1M &&
459 len != SZ_64K && len != SZ_4K) {
460 pr_debug("Bad size: %d\n", len);
461 ret = -EINVAL;
462 goto fail;
463 }
464
465 if (!fl_table) {
466 pr_debug("Null page table\n");
467 ret = -EINVAL;
468 goto fail;
469 }
470
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700471 pgprot = __get_pgprot(prot, len);
472
473 if (!pgprot) {
474 ret = -EINVAL;
475 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800476 }
477
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700478 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
479 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
480
481 if (len == SZ_16M) {
482 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483
484 for (i = 0; i < 16; i++)
485 if (*(fl_pte+i)) {
486 ret = -EBUSY;
487 goto fail;
488 }
489
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700490 for (i = 0; i < 16; i++)
491 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
492 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800493 FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700494 if (!priv->redirect)
495 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700496 }
497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 if (len == SZ_1M) {
499 if (*fl_pte) {
500 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700501 goto fail;
502 }
503
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
505 FL_TYPE_SECT | FL_SHARED | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700506 if (!priv->redirect)
507 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 }
509
510 /* Need a 2nd level table */
511 if (len == SZ_4K || len == SZ_64K) {
512
513 if (*fl_pte == 0) {
514 unsigned long *sl;
515 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
516 get_order(SZ_4K));
517
518 if (!sl) {
519 pr_debug("Could not allocate second level table\n");
520 ret = -ENOMEM;
521 goto fail;
522 }
523 memset(sl, 0, SZ_4K);
524
525 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
526 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700527
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700528 if (!priv->redirect)
529 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 }
531
532 if (!(*fl_pte & FL_TYPE_TABLE)) {
533 ret = -EBUSY;
534 goto fail;
535 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700536 }
537
538 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
539 sl_offset = SL_OFFSET(va);
540 sl_pte = sl_table + sl_offset;
541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 if (len == SZ_4K) {
543 if (*sl_pte) {
544 ret = -EBUSY;
545 goto fail;
546 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700547
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800548 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800549 SL_SHARED | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700550 if (!priv->redirect)
551 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700553
554 if (len == SZ_64K) {
555 int i;
556
557 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 if (*(sl_pte+i)) {
559 ret = -EBUSY;
560 goto fail;
561 }
562
563 for (i = 0; i < 16; i++)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700564 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800565 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700566
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700567 if (!priv->redirect)
568 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700569 }
570
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700571 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700572fail:
573 spin_unlock_irqrestore(&msm_iommu_lock, flags);
574 return ret;
575}
576
577static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
578 int order)
579{
580 struct msm_priv *priv;
581 unsigned long flags;
582 unsigned long *fl_table;
583 unsigned long *fl_pte;
584 unsigned long fl_offset;
585 unsigned long *sl_table;
586 unsigned long *sl_pte;
587 unsigned long sl_offset;
588 size_t len = 0x1000UL << order;
589 int i, ret = 0;
590
591 spin_lock_irqsave(&msm_iommu_lock, flags);
592
593 priv = domain->priv;
594
595 if (!priv) {
596 ret = -ENODEV;
597 goto fail;
598 }
599
600 fl_table = priv->pgtable;
601
602 if (len != SZ_16M && len != SZ_1M &&
603 len != SZ_64K && len != SZ_4K) {
604 pr_debug("Bad length: %d\n", len);
605 ret = -EINVAL;
606 goto fail;
607 }
608
609 if (!fl_table) {
610 pr_debug("Null page table\n");
611 ret = -EINVAL;
612 goto fail;
613 }
614
615 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
616 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
617
618 if (*fl_pte == 0) {
619 pr_debug("First level PTE is 0\n");
620 ret = -ENODEV;
621 goto fail;
622 }
623
624 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700625 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700626 for (i = 0; i < 16; i++)
627 *(fl_pte+i) = 0;
628
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700629 if (!priv->redirect)
630 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700631 }
632
633 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700634 *fl_pte = 0;
635
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700636 if (!priv->redirect)
637 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700638 }
639
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700640 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
641 sl_offset = SL_OFFSET(va);
642 sl_pte = sl_table + sl_offset;
643
644 if (len == SZ_64K) {
645 for (i = 0; i < 16; i++)
646 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700647
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700648 if (!priv->redirect)
649 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700650 }
651
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700652 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700653 *sl_pte = 0;
654
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700655 if (!priv->redirect)
656 clean_pte(sl_pte, sl_pte + 1);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700657 }
658
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700659 if (len == SZ_4K || len == SZ_64K) {
660 int used = 0;
661
662 for (i = 0; i < NUM_SL_PTE; i++)
663 if (sl_table[i])
664 used = 1;
665 if (!used) {
666 free_page((unsigned long)sl_table);
667 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700668
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700669 if (!priv->redirect)
670 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700671 }
672 }
673
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700674 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700675fail:
676 spin_unlock_irqrestore(&msm_iommu_lock, flags);
677 return ret;
678}
679
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700680static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
681 struct scatterlist *sg, unsigned int len,
682 int prot)
683{
684 unsigned int pa;
685 unsigned int offset = 0;
686 unsigned int pgprot;
687 unsigned long *fl_table;
688 unsigned long *fl_pte;
689 unsigned long fl_offset;
690 unsigned long *sl_table;
691 unsigned long sl_offset, sl_start;
692 unsigned long flags;
693 unsigned int chunk_offset = 0;
694 unsigned int chunk_pa;
695 int ret = 0;
696 struct msm_priv *priv;
697
698 spin_lock_irqsave(&msm_iommu_lock, flags);
699
700 BUG_ON(len & (SZ_4K - 1));
701
702 priv = domain->priv;
703 fl_table = priv->pgtable;
704
705 pgprot = __get_pgprot(prot, SZ_4K);
706
707 if (!pgprot) {
708 ret = -EINVAL;
709 goto fail;
710 }
711
712 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
713 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
714
715 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
716 sl_offset = SL_OFFSET(va);
717
718 chunk_pa = sg_phys(sg);
719
720 while (offset < len) {
721 /* Set up a 2nd level page table if one doesn't exist */
722 if (*fl_pte == 0) {
723 sl_table = (unsigned long *)
724 __get_free_pages(GFP_ATOMIC, get_order(SZ_4K));
725
726 if (!sl_table) {
727 pr_debug("Could not allocate second level table\n");
728 ret = -ENOMEM;
729 goto fail;
730 }
731
732 memset(sl_table, 0, SZ_4K);
733 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
734 FL_TYPE_TABLE);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700735 if (!priv->redirect)
736 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700737 } else
738 sl_table = (unsigned long *)
739 __va(((*fl_pte) & FL_BASE_MASK));
740
741 /* Keep track of initial position so we
742 * don't clean more than we have to
743 */
744 sl_start = sl_offset;
745
746 /* Build the 2nd level page table */
747 while (offset < len && sl_offset < NUM_SL_PTE) {
748 pa = chunk_pa + chunk_offset;
749 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
750 pgprot | SL_AP0 | SL_AP1 | SL_NG |
751 SL_SHARED | SL_TYPE_SMALL;
752 sl_offset++;
753 offset += SZ_4K;
754
755 chunk_offset += SZ_4K;
756
757 if (chunk_offset >= sg->length && offset < len) {
758 chunk_offset = 0;
759 sg = sg_next(sg);
760 chunk_pa = sg_phys(sg);
761 }
762 }
763
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700764 if (!priv->redirect)
765 clean_pte(sl_table + sl_start, sl_table + sl_offset);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700766
767 fl_pte++;
768 sl_offset = 0;
769 }
770 __flush_iotlb(domain);
771fail:
772 spin_unlock_irqrestore(&msm_iommu_lock, flags);
773 return ret;
774}
775
776
777static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
778 unsigned int len)
779{
780 unsigned int offset = 0;
781 unsigned long *fl_table;
782 unsigned long *fl_pte;
783 unsigned long fl_offset;
784 unsigned long *sl_table;
785 unsigned long sl_start, sl_end;
786 unsigned long flags;
787 int used, i;
788 struct msm_priv *priv;
789
790 spin_lock_irqsave(&msm_iommu_lock, flags);
791
792 BUG_ON(len & (SZ_4K - 1));
793
794 priv = domain->priv;
795 fl_table = priv->pgtable;
796
797 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
798 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
799
800 sl_start = SL_OFFSET(va);
801
802 while (offset < len) {
803 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
804 sl_end = ((len - offset) / SZ_4K) + sl_start;
805
806 if (sl_end > NUM_SL_PTE)
807 sl_end = NUM_SL_PTE;
808
809 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700810 if (!priv->redirect)
811 clean_pte(sl_table + sl_start, sl_table + sl_end);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700812
813 offset += (sl_end - sl_start) * SZ_4K;
814
815 /* Unmap and free the 2nd level table if all mappings in it
816 * were removed. This saves memory, but the table will need
817 * to be re-allocated the next time someone tries to map these
818 * VAs.
819 */
820 used = 0;
821
822 /* If we just unmapped the whole table, don't bother
823 * seeing if there are still used entries left.
824 */
825 if (sl_end - sl_start != NUM_SL_PTE)
826 for (i = 0; i < NUM_SL_PTE; i++)
827 if (sl_table[i]) {
828 used = 1;
829 break;
830 }
831 if (!used) {
832 free_page((unsigned long)sl_table);
833 *fl_pte = 0;
834
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700835 if (!priv->redirect)
836 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700837 }
838
839 sl_start = 0;
840 fl_pte++;
841 }
842
843 __flush_iotlb(domain);
844 spin_unlock_irqrestore(&msm_iommu_lock, flags);
845 return 0;
846}
847
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700848static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
849 unsigned long va)
850{
851 struct msm_priv *priv;
852 struct msm_iommu_drvdata *iommu_drvdata;
853 struct msm_iommu_ctx_drvdata *ctx_drvdata;
854 unsigned int par;
855 unsigned long flags;
856 void __iomem *base;
857 phys_addr_t ret = 0;
858 int ctx;
859
860 spin_lock_irqsave(&msm_iommu_lock, flags);
861
862 priv = domain->priv;
863 if (list_empty(&priv->list_attached))
864 goto fail;
865
866 ctx_drvdata = list_entry(priv->list_attached.next,
867 struct msm_iommu_ctx_drvdata, attached_elm);
868 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
869
870 base = iommu_drvdata->base;
871 ctx = ctx_drvdata->num;
872
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800873 ret = __enable_clocks(iommu_drvdata);
874 if (ret)
875 goto fail;
876
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800877 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700879 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700880 par = GET_PAR(base, ctx);
881
882 /* We are dealing with a supersection */
883 if (GET_NOFAULT_SS(base, ctx))
884 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
885 else /* Upper 20 bits from PAR, lower 12 from VA */
886 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
887
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800888 if (GET_FAULT(base, ctx))
889 ret = 0;
890
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800891 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700892fail:
893 spin_unlock_irqrestore(&msm_iommu_lock, flags);
894 return ret;
895}
896
897static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
898 unsigned long cap)
899{
900 return 0;
901}
902
903static void print_ctx_regs(void __iomem *base, int ctx)
904{
905 unsigned int fsr = GET_FSR(base, ctx);
906 pr_err("FAR = %08x PAR = %08x\n",
907 GET_FAR(base, ctx), GET_PAR(base, ctx));
908 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
909 (fsr & 0x02) ? "TF " : "",
910 (fsr & 0x04) ? "AFF " : "",
911 (fsr & 0x08) ? "APF " : "",
912 (fsr & 0x10) ? "TLBMF " : "",
913 (fsr & 0x20) ? "HTWDEEF " : "",
914 (fsr & 0x40) ? "HTWSEEF " : "",
915 (fsr & 0x80) ? "MHF " : "",
916 (fsr & 0x10000) ? "SL " : "",
917 (fsr & 0x40000000) ? "SS " : "",
918 (fsr & 0x80000000) ? "MULTI " : "");
919
920 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
921 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
922 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
923 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
924 pr_err("SCTLR = %08x ACTLR = %08x\n",
925 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
926 pr_err("PRRR = %08x NMRR = %08x\n",
927 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
928}
929
930irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
931{
932 struct msm_iommu_drvdata *drvdata = dev_id;
933 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800934 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800935 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700936
937 spin_lock(&msm_iommu_lock);
938
939 if (!drvdata) {
940 pr_err("Invalid device ID in context interrupt handler\n");
941 goto fail;
942 }
943
944 base = drvdata->base;
945
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700946 pr_err("Unexpected IOMMU page fault!\n");
947 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700949
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800950 ret = __enable_clocks(drvdata);
951 if (ret)
952 goto fail;
953
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800954 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700955 fsr = GET_FSR(base, i);
956 if (fsr) {
957 pr_err("Fault occurred in context %d.\n", i);
958 pr_err("Interesting registers:\n");
959 print_ctx_regs(base, i);
960 SET_FSR(base, i, 0x4000000F);
961 }
962 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800963 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700964fail:
965 spin_unlock(&msm_iommu_lock);
966 return 0;
967}
968
969static struct iommu_ops msm_iommu_ops = {
970 .domain_init = msm_iommu_domain_init,
971 .domain_destroy = msm_iommu_domain_destroy,
972 .attach_dev = msm_iommu_attach_dev,
973 .detach_dev = msm_iommu_detach_dev,
974 .map = msm_iommu_map,
975 .unmap = msm_iommu_unmap,
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700976 .map_range = msm_iommu_map_range,
977 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700978 .iova_to_phys = msm_iommu_iova_to_phys,
979 .domain_has_cap = msm_iommu_domain_has_cap
980};
981
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800982static int __init get_tex_class(int icp, int ocp, int mt, int nos)
983{
984 int i = 0;
985 unsigned int prrr = 0;
986 unsigned int nmrr = 0;
987 int c_icp, c_ocp, c_mt, c_nos;
988
989 RCP15_PRRR(prrr);
990 RCP15_NMRR(nmrr);
991
992 for (i = 0; i < NUM_TEX_CLASS; i++) {
993 c_nos = PRRR_NOS(prrr, i);
994 c_mt = PRRR_MT(prrr, i);
995 c_icp = NMRR_ICP(nmrr, i);
996 c_ocp = NMRR_OCP(nmrr, i);
997
998 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
999 return i;
1000 }
1001
1002 return -ENODEV;
1003}
1004
1005static void __init setup_iommu_tex_classes(void)
1006{
1007 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1008 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1009
1010 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1011 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1012
1013 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1014 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1015
1016 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1017 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1018}
1019
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001020static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001021{
Stepan Moskovchenko15f209c2011-10-31 15:32:44 -07001022 if (!msm_soc_version_supports_iommu())
1023 return -ENODEV;
1024
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001025 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001026 register_iommu(&msm_iommu_ops);
1027 return 0;
1028}
1029
1030subsys_initcall(msm_iommu_init);
1031
1032MODULE_LICENSE("GPL v2");
1033MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");