blob: e6420f0865995e241095c93609a177d0f31ebf3e [file] [log] [blame]
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070025
26#include <asm/cacheflush.h>
27#include <asm/sizes.h>
28
29#include <mach/iommu_hw-8xxx.h>
30#include <mach/iommu.h>
31
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080032#define MRC(reg, processor, op1, crn, crm, op2) \
33__asm__ __volatile__ ( \
34" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
35: "=r" (reg))
36
37#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
38#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
39
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070040#ifndef CONFIG_IOMMU_PGTABLES_L2
41static inline void clean_pte(unsigned long *start, unsigned long *end)
42{
43 dmac_flush_range(start, end);
44}
45#else
46static inline void clean_pte(unsigned long *start, unsigned long *end) { }
47#endif
48
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080049static int msm_iommu_tex_class[4];
50
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070051DEFINE_SPINLOCK(msm_iommu_lock);
52
53struct msm_priv {
54 unsigned long *pgtable;
55 struct list_head list_attached;
56};
57
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080058static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
59{
60 int ret;
61
62 ret = clk_enable(drvdata->pclk);
63 if (ret)
64 goto fail;
65
66 if (drvdata->clk) {
67 ret = clk_enable(drvdata->clk);
68 if (ret)
69 clk_disable(drvdata->pclk);
70 }
71fail:
72 return ret;
73}
74
75static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
76{
77 if (drvdata->clk)
78 clk_disable(drvdata->clk);
79 clk_disable(drvdata->pclk);
80}
81
Stepan Moskovchenko33069732010-11-12 19:30:00 -080082static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070083{
84 struct msm_priv *priv = domain->priv;
85 struct msm_iommu_drvdata *iommu_drvdata;
86 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -080087 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070088
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070089 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
90 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
91 BUG();
92
93 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080094 BUG_ON(!iommu_drvdata);
Stepan Moskovchenko33069732010-11-12 19:30:00 -080095
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080096 ret = __enable_clocks(iommu_drvdata);
97 if (ret)
98 goto fail;
99
100 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700101 mb();
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800102 __disable_clocks(iommu_drvdata);
103 }
104fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800105 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700106}
107
108static void __reset_context(void __iomem *base, int ctx)
109{
110 SET_BPRCOSH(base, ctx, 0);
111 SET_BPRCISH(base, ctx, 0);
112 SET_BPRCNSH(base, ctx, 0);
113 SET_BPSHCFG(base, ctx, 0);
114 SET_BPMTCFG(base, ctx, 0);
115 SET_ACTLR(base, ctx, 0);
116 SET_SCTLR(base, ctx, 0);
117 SET_FSRRESTORE(base, ctx, 0);
118 SET_TTBR0(base, ctx, 0);
119 SET_TTBR1(base, ctx, 0);
120 SET_TTBCR(base, ctx, 0);
121 SET_BFBCR(base, ctx, 0);
122 SET_PAR(base, ctx, 0);
123 SET_FAR(base, ctx, 0);
124 SET_CTX_TLBIALL(base, ctx, 0);
125 SET_TLBFLPTER(base, ctx, 0);
126 SET_TLBSLPTER(base, ctx, 0);
127 SET_TLBLKCR(base, ctx, 0);
128 SET_PRRR(base, ctx, 0);
129 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700131}
132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133static void __program_context(void __iomem *base, int ctx, int ncb,
134 phys_addr_t pgtable)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700135{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800136 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700138 __reset_context(base, ctx);
139
140 /* Set up HTW mode */
141 /* TLB miss configuration: perform HTW on miss */
142 SET_TLBMCFG(base, ctx, 0x3);
143
144 /* V2P configuration: HTW for access */
145 SET_V2PCFG(base, ctx, 0x3);
146
147 SET_TTBCR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700148 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700149
150 /* Invalidate the TLB for this context */
151 SET_CTX_TLBIALL(base, ctx, 0);
152
153 /* Set interrupt number to "secure" interrupt */
154 SET_IRPTNDX(base, ctx, 0);
155
156 /* Enable context fault interrupt */
157 SET_CFEIE(base, ctx, 1);
158
159 /* Stall access on a context fault and let the handler deal with it */
160 SET_CFCFG(base, ctx, 1);
161
162 /* Redirect all cacheable requests to L2 slave port. */
163 SET_RCISH(base, ctx, 1);
164 SET_RCOSH(base, ctx, 1);
165 SET_RCNSH(base, ctx, 1);
166
167 /* Turn on TEX Remap */
168 SET_TRE(base, ctx, 1);
169
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800170 /* Set TEX remap attributes */
171 RCP15_PRRR(prrr);
172 RCP15_NMRR(nmrr);
173 SET_PRRR(base, ctx, prrr);
174 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700175
176 /* Turn on BFB prefetch */
177 SET_BFBDFE(base, ctx, 1);
178
179#ifdef CONFIG_IOMMU_PGTABLES_L2
180 /* Configure page tables as inner-cacheable and shareable to reduce
181 * the TLB miss penalty.
182 */
183 SET_TTBR0_SH(base, ctx, 1);
184 SET_TTBR1_SH(base, ctx, 1);
185
186 SET_TTBR0_NOS(base, ctx, 1);
187 SET_TTBR1_NOS(base, ctx, 1);
188
189 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
190 SET_TTBR0_IRGNL(base, ctx, 1);
191
192 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
193 SET_TTBR1_IRGNL(base, ctx, 1);
194
195 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
196 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
197#endif
198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 /* Find if this page table is used elsewhere, and re-use ASID */
200 found = 0;
201 for (i = 0; i < ncb; i++)
202 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
203 i != ctx) {
204 SET_CONTEXTIDR_ASID(base, ctx, \
205 GET_CONTEXTIDR_ASID(base, i));
206 found = 1;
207 break;
208 }
209
210 /* If page table is new, find an unused ASID */
211 if (!found) {
212 for (i = 0; i < ncb; i++) {
213 found = 0;
214 for (j = 0; j < ncb; j++) {
215 if (GET_CONTEXTIDR_ASID(base, j) == i &&
216 j != ctx)
217 found = 1;
218 }
219
220 if (!found) {
221 SET_CONTEXTIDR_ASID(base, ctx, i);
222 break;
223 }
224 }
225 BUG_ON(found);
226 }
227
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700228 /* Enable the MMU */
229 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700231}
232
233static int msm_iommu_domain_init(struct iommu_domain *domain)
234{
235 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
236
237 if (!priv)
238 goto fail_nomem;
239
240 INIT_LIST_HEAD(&priv->list_attached);
241 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
242 get_order(SZ_16K));
243
244 if (!priv->pgtable)
245 goto fail_nomem;
246
247 memset(priv->pgtable, 0, SZ_16K);
248 domain->priv = priv;
249 return 0;
250
251fail_nomem:
252 kfree(priv);
253 return -ENOMEM;
254}
255
256static void msm_iommu_domain_destroy(struct iommu_domain *domain)
257{
258 struct msm_priv *priv;
259 unsigned long flags;
260 unsigned long *fl_table;
261 int i;
262
263 spin_lock_irqsave(&msm_iommu_lock, flags);
264 priv = domain->priv;
265 domain->priv = NULL;
266
267 if (priv) {
268 fl_table = priv->pgtable;
269
270 for (i = 0; i < NUM_FL_PTE; i++)
271 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
272 free_page((unsigned long) __va(((fl_table[i]) &
273 FL_BASE_MASK)));
274
275 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
276 priv->pgtable = NULL;
277 }
278
279 kfree(priv);
280 spin_unlock_irqrestore(&msm_iommu_lock, flags);
281}
282
283static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
284{
285 struct msm_priv *priv;
286 struct msm_iommu_ctx_dev *ctx_dev;
287 struct msm_iommu_drvdata *iommu_drvdata;
288 struct msm_iommu_ctx_drvdata *ctx_drvdata;
289 struct msm_iommu_ctx_drvdata *tmp_drvdata;
290 int ret = 0;
291 unsigned long flags;
292
293 spin_lock_irqsave(&msm_iommu_lock, flags);
294
295 priv = domain->priv;
296
297 if (!priv || !dev) {
298 ret = -EINVAL;
299 goto fail;
300 }
301
302 iommu_drvdata = dev_get_drvdata(dev->parent);
303 ctx_drvdata = dev_get_drvdata(dev);
304 ctx_dev = dev->platform_data;
305
306 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
307 ret = -EINVAL;
308 goto fail;
309 }
310
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800311 if (!list_empty(&ctx_drvdata->attached_elm)) {
312 ret = -EBUSY;
313 goto fail;
314 }
315
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700316 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
317 if (tmp_drvdata == ctx_drvdata) {
318 ret = -EBUSY;
319 goto fail;
320 }
321
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800322 ret = __enable_clocks(iommu_drvdata);
323 if (ret)
324 goto fail;
325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700327 __pa(priv->pgtable));
328
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800329 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700330 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800331 ret = __flush_iotlb(domain);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700332
333fail:
334 spin_unlock_irqrestore(&msm_iommu_lock, flags);
335 return ret;
336}
337
338static void msm_iommu_detach_dev(struct iommu_domain *domain,
339 struct device *dev)
340{
341 struct msm_priv *priv;
342 struct msm_iommu_ctx_dev *ctx_dev;
343 struct msm_iommu_drvdata *iommu_drvdata;
344 struct msm_iommu_ctx_drvdata *ctx_drvdata;
345 unsigned long flags;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800346 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700347
348 spin_lock_irqsave(&msm_iommu_lock, flags);
349 priv = domain->priv;
350
351 if (!priv || !dev)
352 goto fail;
353
354 iommu_drvdata = dev_get_drvdata(dev->parent);
355 ctx_drvdata = dev_get_drvdata(dev);
356 ctx_dev = dev->platform_data;
357
358 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
359 goto fail;
360
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800361 ret = __flush_iotlb(domain);
362 if (ret)
363 goto fail;
364
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800365 ret = __enable_clocks(iommu_drvdata);
366 if (ret)
367 goto fail;
368
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700369 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800370 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700371 list_del_init(&ctx_drvdata->attached_elm);
372
373fail:
374 spin_unlock_irqrestore(&msm_iommu_lock, flags);
375}
376
377static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
378 phys_addr_t pa, int order, int prot)
379{
380 struct msm_priv *priv;
381 unsigned long flags;
382 unsigned long *fl_table;
383 unsigned long *fl_pte;
384 unsigned long fl_offset;
385 unsigned long *sl_table;
386 unsigned long *sl_pte;
387 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800388 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700389 size_t len = 0x1000UL << order;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800390 int ret = 0, tex, sh;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700391
392 spin_lock_irqsave(&msm_iommu_lock, flags);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700393
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800394 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
395 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
396
397 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
398 ret = -EINVAL;
399 goto fail;
400 }
401
402 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700403 if (!priv) {
404 ret = -EINVAL;
405 goto fail;
406 }
407
408 fl_table = priv->pgtable;
409
410 if (len != SZ_16M && len != SZ_1M &&
411 len != SZ_64K && len != SZ_4K) {
412 pr_debug("Bad size: %d\n", len);
413 ret = -EINVAL;
414 goto fail;
415 }
416
417 if (!fl_table) {
418 pr_debug("Null page table\n");
419 ret = -EINVAL;
420 goto fail;
421 }
422
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800423 if (len == SZ_16M || len == SZ_1M) {
424 pgprot = sh ? FL_SHARED : 0;
425 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
426 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
427 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
428 } else {
429 pgprot = sh ? SL_SHARED : 0;
430 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
431 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
432 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
433 }
434
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700435 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
436 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
437
438 if (len == SZ_16M) {
439 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440
441 for (i = 0; i < 16; i++)
442 if (*(fl_pte+i)) {
443 ret = -EBUSY;
444 goto fail;
445 }
446
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700447 for (i = 0; i < 16; i++)
448 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
449 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800450 FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700451
452 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700453 }
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455 if (len == SZ_1M) {
456 if (*fl_pte) {
457 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700458 goto fail;
459 }
460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
462 FL_TYPE_SECT | FL_SHARED | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700463
464 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 }
466
467 /* Need a 2nd level table */
468 if (len == SZ_4K || len == SZ_64K) {
469
470 if (*fl_pte == 0) {
471 unsigned long *sl;
472 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
473 get_order(SZ_4K));
474
475 if (!sl) {
476 pr_debug("Could not allocate second level table\n");
477 ret = -ENOMEM;
478 goto fail;
479 }
480 memset(sl, 0, SZ_4K);
481
482 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
483 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700484
485 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 }
487
488 if (!(*fl_pte & FL_TYPE_TABLE)) {
489 ret = -EBUSY;
490 goto fail;
491 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700492 }
493
494 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
495 sl_offset = SL_OFFSET(va);
496 sl_pte = sl_table + sl_offset;
497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 if (len == SZ_4K) {
499 if (*sl_pte) {
500 ret = -EBUSY;
501 goto fail;
502 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700503
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800504 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800505 SL_SHARED | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700506 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700508
509 if (len == SZ_64K) {
510 int i;
511
512 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 if (*(sl_pte+i)) {
514 ret = -EBUSY;
515 goto fail;
516 }
517
518 for (i = 0; i < 16; i++)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700519 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800520 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700521
522 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700523 }
524
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800525 ret = __flush_iotlb(domain);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700526fail:
527 spin_unlock_irqrestore(&msm_iommu_lock, flags);
528 return ret;
529}
530
531static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
532 int order)
533{
534 struct msm_priv *priv;
535 unsigned long flags;
536 unsigned long *fl_table;
537 unsigned long *fl_pte;
538 unsigned long fl_offset;
539 unsigned long *sl_table;
540 unsigned long *sl_pte;
541 unsigned long sl_offset;
542 size_t len = 0x1000UL << order;
543 int i, ret = 0;
544
545 spin_lock_irqsave(&msm_iommu_lock, flags);
546
547 priv = domain->priv;
548
549 if (!priv) {
550 ret = -ENODEV;
551 goto fail;
552 }
553
554 fl_table = priv->pgtable;
555
556 if (len != SZ_16M && len != SZ_1M &&
557 len != SZ_64K && len != SZ_4K) {
558 pr_debug("Bad length: %d\n", len);
559 ret = -EINVAL;
560 goto fail;
561 }
562
563 if (!fl_table) {
564 pr_debug("Null page table\n");
565 ret = -EINVAL;
566 goto fail;
567 }
568
569 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
570 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
571
572 if (*fl_pte == 0) {
573 pr_debug("First level PTE is 0\n");
574 ret = -ENODEV;
575 goto fail;
576 }
577
578 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700579 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700580 for (i = 0; i < 16; i++)
581 *(fl_pte+i) = 0;
582
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700583 clean_pte(fl_pte, fl_pte + 16);
584 }
585
586 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700587 *fl_pte = 0;
588
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700589 clean_pte(fl_pte, fl_pte + 1);
590 }
591
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700592 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
593 sl_offset = SL_OFFSET(va);
594 sl_pte = sl_table + sl_offset;
595
596 if (len == SZ_64K) {
597 for (i = 0; i < 16; i++)
598 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700599
600 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700601 }
602
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700603 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700604 *sl_pte = 0;
605
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700606 clean_pte(sl_pte, sl_pte + 1);
607 }
608
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700609 if (len == SZ_4K || len == SZ_64K) {
610 int used = 0;
611
612 for (i = 0; i < NUM_SL_PTE; i++)
613 if (sl_table[i])
614 used = 1;
615 if (!used) {
616 free_page((unsigned long)sl_table);
617 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700618
619 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700620 }
621 }
622
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800623 ret = __flush_iotlb(domain);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700624fail:
625 spin_unlock_irqrestore(&msm_iommu_lock, flags);
626 return ret;
627}
628
629static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
630 unsigned long va)
631{
632 struct msm_priv *priv;
633 struct msm_iommu_drvdata *iommu_drvdata;
634 struct msm_iommu_ctx_drvdata *ctx_drvdata;
635 unsigned int par;
636 unsigned long flags;
637 void __iomem *base;
638 phys_addr_t ret = 0;
639 int ctx;
640
641 spin_lock_irqsave(&msm_iommu_lock, flags);
642
643 priv = domain->priv;
644 if (list_empty(&priv->list_attached))
645 goto fail;
646
647 ctx_drvdata = list_entry(priv->list_attached.next,
648 struct msm_iommu_ctx_drvdata, attached_elm);
649 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
650
651 base = iommu_drvdata->base;
652 ctx = ctx_drvdata->num;
653
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800654 ret = __enable_clocks(iommu_drvdata);
655 if (ret)
656 goto fail;
657
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700658 /* Invalidate context TLB */
659 SET_CTX_TLBIALL(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 mb();
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800661 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700664 par = GET_PAR(base, ctx);
665
666 /* We are dealing with a supersection */
667 if (GET_NOFAULT_SS(base, ctx))
668 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
669 else /* Upper 20 bits from PAR, lower 12 from VA */
670 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
671
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800672 if (GET_FAULT(base, ctx))
673 ret = 0;
674
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800675 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700676fail:
677 spin_unlock_irqrestore(&msm_iommu_lock, flags);
678 return ret;
679}
680
681static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
682 unsigned long cap)
683{
684 return 0;
685}
686
687static void print_ctx_regs(void __iomem *base, int ctx)
688{
689 unsigned int fsr = GET_FSR(base, ctx);
690 pr_err("FAR = %08x PAR = %08x\n",
691 GET_FAR(base, ctx), GET_PAR(base, ctx));
692 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
693 (fsr & 0x02) ? "TF " : "",
694 (fsr & 0x04) ? "AFF " : "",
695 (fsr & 0x08) ? "APF " : "",
696 (fsr & 0x10) ? "TLBMF " : "",
697 (fsr & 0x20) ? "HTWDEEF " : "",
698 (fsr & 0x40) ? "HTWSEEF " : "",
699 (fsr & 0x80) ? "MHF " : "",
700 (fsr & 0x10000) ? "SL " : "",
701 (fsr & 0x40000000) ? "SS " : "",
702 (fsr & 0x80000000) ? "MULTI " : "");
703
704 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
705 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
706 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
707 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
708 pr_err("SCTLR = %08x ACTLR = %08x\n",
709 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
710 pr_err("PRRR = %08x NMRR = %08x\n",
711 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
712}
713
714irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
715{
716 struct msm_iommu_drvdata *drvdata = dev_id;
717 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800718 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800719 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700720
721 spin_lock(&msm_iommu_lock);
722
723 if (!drvdata) {
724 pr_err("Invalid device ID in context interrupt handler\n");
725 goto fail;
726 }
727
728 base = drvdata->base;
729
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700730 pr_err("Unexpected IOMMU page fault!\n");
731 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700733
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800734 ret = __enable_clocks(drvdata);
735 if (ret)
736 goto fail;
737
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800738 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700739 fsr = GET_FSR(base, i);
740 if (fsr) {
741 pr_err("Fault occurred in context %d.\n", i);
742 pr_err("Interesting registers:\n");
743 print_ctx_regs(base, i);
744 SET_FSR(base, i, 0x4000000F);
745 }
746 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800747 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700748fail:
749 spin_unlock(&msm_iommu_lock);
750 return 0;
751}
752
753static struct iommu_ops msm_iommu_ops = {
754 .domain_init = msm_iommu_domain_init,
755 .domain_destroy = msm_iommu_domain_destroy,
756 .attach_dev = msm_iommu_attach_dev,
757 .detach_dev = msm_iommu_detach_dev,
758 .map = msm_iommu_map,
759 .unmap = msm_iommu_unmap,
760 .iova_to_phys = msm_iommu_iova_to_phys,
761 .domain_has_cap = msm_iommu_domain_has_cap
762};
763
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800764static int __init get_tex_class(int icp, int ocp, int mt, int nos)
765{
766 int i = 0;
767 unsigned int prrr = 0;
768 unsigned int nmrr = 0;
769 int c_icp, c_ocp, c_mt, c_nos;
770
771 RCP15_PRRR(prrr);
772 RCP15_NMRR(nmrr);
773
774 for (i = 0; i < NUM_TEX_CLASS; i++) {
775 c_nos = PRRR_NOS(prrr, i);
776 c_mt = PRRR_MT(prrr, i);
777 c_icp = NMRR_ICP(nmrr, i);
778 c_ocp = NMRR_OCP(nmrr, i);
779
780 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
781 return i;
782 }
783
784 return -ENODEV;
785}
786
787static void __init setup_iommu_tex_classes(void)
788{
789 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
790 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
791
792 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
793 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
794
795 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
796 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
797
798 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
799 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
800}
801
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -0800802static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700803{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800804 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700805 register_iommu(&msm_iommu_ops);
806 return 0;
807}
808
809subsys_initcall(msm_iommu_init);
810
811MODULE_LICENSE("GPL v2");
812MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");