blob: 10d0b6650ad50216c2e495b552f0c67c0e67925c [file] [log] [blame]
Steve Mucklef132c6c2012-06-06 18:30:57 -07001/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
26
27#include <asm/sizes.h>
28
29#include <mach/iommu_hw-v2.h>
30#include <mach/iommu.h>
31
32#include "msm_iommu_pagetable.h"
33
34/* bitmap of the page sizes currently supported */
35#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36
37static DEFINE_MUTEX(msm_iommu_lock);
38
39struct msm_priv {
40 struct iommu_pt pt;
41 struct list_head list_attached;
42};
43
44static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
45{
46 int ret;
47
48 ret = clk_prepare_enable(drvdata->pclk);
49 if (ret)
50 goto fail;
51
52 if (drvdata->clk) {
53 ret = clk_prepare_enable(drvdata->clk);
54 if (ret)
55 clk_disable_unprepare(drvdata->pclk);
56 }
57fail:
58 return ret;
59}
60
61static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
62{
63 if (drvdata->clk)
64 clk_disable_unprepare(drvdata->clk);
65 clk_disable_unprepare(drvdata->pclk);
66}
67
68static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
69{
70 struct msm_priv *priv = domain->priv;
71 struct msm_iommu_drvdata *iommu_drvdata;
72 struct msm_iommu_ctx_drvdata *ctx_drvdata;
73 int asid;
74
75 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
76 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
77
78 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
79 BUG_ON(!iommu_drvdata);
80
81 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
82 ctx_drvdata->num);
83
84 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
85 asid | (va & CB_TLBIVA_VA));
86 mb();
87 }
88
89 return 0;
90}
91
92static int __flush_iotlb(struct iommu_domain *domain)
93{
94 struct msm_priv *priv = domain->priv;
95 struct msm_iommu_drvdata *iommu_drvdata;
96 struct msm_iommu_ctx_drvdata *ctx_drvdata;
97 int asid;
98
99 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
100 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
101
102 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
103 BUG_ON(!iommu_drvdata);
104
105 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
106 ctx_drvdata->num);
107
108 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
109 mb();
110 }
111
112 return 0;
113}
114
115static void __reset_context(void __iomem *base, int ctx)
116{
117 SET_ACTLR(base, ctx, 0);
118 SET_FAR(base, ctx, 0);
119 SET_FSRRESTORE(base, ctx, 0);
120 SET_NMRR(base, ctx, 0);
121 SET_PAR(base, ctx, 0);
122 SET_PRRR(base, ctx, 0);
123 SET_SCTLR(base, ctx, 0);
124 SET_TLBIALL(base, ctx, 0);
125 SET_TTBCR(base, ctx, 0);
126 SET_TTBR0(base, ctx, 0);
127 SET_TTBR1(base, ctx, 0);
128 mb();
129}
130
131static void __program_context(void __iomem *base, int ctx, int ncb,
132 phys_addr_t pgtable, int redirect)
133{
134 unsigned int prrr, nmrr;
135 unsigned int pn;
136 int i, j, found;
137
138 __reset_context(base, ctx);
139
140 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
141 SET_TTBCR(base, ctx, 0);
142 SET_CB_TTBR0_ADDR(base, ctx, pn);
143
144 /* Enable context fault interrupt */
145 SET_CB_SCTLR_CFIE(base, ctx, 1);
146
147 /* Redirect all cacheable requests to L2 slave port. */
148 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
149 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
150 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
151
152 /* Turn on TEX Remap */
153 SET_CB_SCTLR_TRE(base, ctx, 1);
154
155 /* Enable private ASID namespace */
156 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
157
158 /* Set TEX remap attributes */
159 RCP15_PRRR(prrr);
160 RCP15_NMRR(nmrr);
161 SET_PRRR(base, ctx, prrr);
162 SET_NMRR(base, ctx, nmrr);
163
164 /* Configure page tables as inner-cacheable and shareable to reduce
165 * the TLB miss penalty.
166 */
167 if (redirect) {
168 SET_CB_TTBR0_S(base, ctx, 1);
169 SET_CB_TTBR0_NOS(base, ctx, 1);
170 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
171 SET_CB_TTBR0_IRGN0(base, ctx, 1);
172 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
173 }
174
175 /* Find if this page table is used elsewhere, and re-use ASID */
176 found = 0;
177 for (i = 0; i < ncb; i++)
178 if ((GET_CB_TTBR0_ADDR(base, i) == pn) && (i != ctx)) {
179 SET_CB_CONTEXTIDR_ASID(base, ctx, \
180 GET_CB_CONTEXTIDR_ASID(base, i));
181 found = 1;
182 break;
183 }
184
185 /* If page table is new, find an unused ASID */
186 if (!found) {
187 for (i = 0; i < ncb; i++) {
188 found = 0;
189 for (j = 0; j < ncb; j++) {
190 if (GET_CB_CONTEXTIDR_ASID(base, j) == i &&
191 j != ctx)
192 found = 1;
193 }
194
195 if (!found) {
196 SET_CB_CONTEXTIDR_ASID(base, ctx, i);
197 break;
198 }
199 }
200 BUG_ON(found);
201 }
202
203 /* Enable the MMU */
204 SET_CB_SCTLR_M(base, ctx, 1);
205 mb();
206}
207
208static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
209{
210 struct msm_priv *priv;
211
212 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
213 if (!priv)
214 goto fail_nomem;
215
216#ifdef CONFIG_IOMMU_PGTABLES_L2
217 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
218#endif
219
220 INIT_LIST_HEAD(&priv->list_attached);
221 if (msm_iommu_pagetable_alloc(&priv->pt))
222 goto fail_nomem;
223
224 domain->priv = priv;
225 return 0;
226
227fail_nomem:
228 kfree(priv);
229 return -ENOMEM;
230}
231
232static void msm_iommu_domain_destroy(struct iommu_domain *domain)
233{
234 struct msm_priv *priv;
235
236 mutex_lock(&msm_iommu_lock);
237 priv = domain->priv;
238 domain->priv = NULL;
239
240 if (priv)
241 msm_iommu_pagetable_free(&priv->pt);
242
243 kfree(priv);
244 mutex_unlock(&msm_iommu_lock);
245}
246
247static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
248{
249 struct msm_priv *priv;
250 struct msm_iommu_drvdata *iommu_drvdata;
251 struct msm_iommu_ctx_drvdata *ctx_drvdata;
252 struct msm_iommu_ctx_drvdata *tmp_drvdata;
253 int ret = 0;
254
255 mutex_lock(&msm_iommu_lock);
256
257 priv = domain->priv;
258 if (!priv || !dev) {
259 ret = -EINVAL;
260 goto fail;
261 }
262
263 iommu_drvdata = dev_get_drvdata(dev->parent);
264 ctx_drvdata = dev_get_drvdata(dev);
265 if (!iommu_drvdata || !ctx_drvdata) {
266 ret = -EINVAL;
267 goto fail;
268 }
269
270 if (!list_empty(&ctx_drvdata->attached_elm)) {
271 ret = -EBUSY;
272 goto fail;
273 }
274
275 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
276 if (tmp_drvdata == ctx_drvdata) {
277 ret = -EBUSY;
278 goto fail;
279 }
280
281 ret = __enable_clocks(iommu_drvdata);
282 if (ret)
283 goto fail;
284
285 __program_context(iommu_drvdata->base, ctx_drvdata->num,
286 iommu_drvdata->ncb, __pa(priv->pt.fl_table),
287 priv->pt.redirect);
288
289 __disable_clocks(iommu_drvdata);
290 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
291 ctx_drvdata->attached_domain = domain;
292
293fail:
294 mutex_unlock(&msm_iommu_lock);
295 return ret;
296}
297
298static void msm_iommu_detach_dev(struct iommu_domain *domain,
299 struct device *dev)
300{
301 struct msm_priv *priv;
302 struct msm_iommu_drvdata *iommu_drvdata;
303 struct msm_iommu_ctx_drvdata *ctx_drvdata;
304 int ret;
305
306 mutex_lock(&msm_iommu_lock);
307 priv = domain->priv;
308 if (!priv || !dev)
309 goto fail;
310
311 iommu_drvdata = dev_get_drvdata(dev->parent);
312 ctx_drvdata = dev_get_drvdata(dev);
313 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
314 goto fail;
315
316 ret = __enable_clocks(iommu_drvdata);
317 if (ret)
318 goto fail;
319
320 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
321 GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
322
323 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
324 __disable_clocks(iommu_drvdata);
325 list_del_init(&ctx_drvdata->attached_elm);
326 ctx_drvdata->attached_domain = NULL;
327
328fail:
329 mutex_unlock(&msm_iommu_lock);
330}
331
332static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
333 phys_addr_t pa, size_t len, int prot)
334{
335 struct msm_priv *priv;
336 int ret = 0;
337
338 mutex_lock(&msm_iommu_lock);
339
340 priv = domain->priv;
341 if (!priv) {
342 ret = -EINVAL;
343 goto fail;
344 }
345
346 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
347 if (ret)
348 goto fail;
349
350 ret = __flush_iotlb_va(domain, va);
351fail:
352 mutex_unlock(&msm_iommu_lock);
353 return ret;
354}
355
356static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
357 size_t len)
358{
359 struct msm_priv *priv;
360 int ret = -ENODEV;
361
362 mutex_lock(&msm_iommu_lock);
363
364 priv = domain->priv;
365 if (!priv)
366 goto fail;
367
368 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
369 if (ret < 0)
370 goto fail;
371
372 ret = __flush_iotlb_va(domain, va);
373fail:
374 mutex_unlock(&msm_iommu_lock);
375
376 /* the IOMMU API requires us to return how many bytes were unmapped */
377 len = ret ? 0 : len;
378 return len;
379}
380
381static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
382 struct scatterlist *sg, unsigned int len,
383 int prot)
384{
385 int ret;
386 struct msm_priv *priv;
387
388 mutex_lock(&msm_iommu_lock);
389
390 priv = domain->priv;
391 if (!priv) {
392 ret = -EINVAL;
393 goto fail;
394 }
395
396 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
397 if (ret)
398 goto fail;
399
400 __flush_iotlb(domain);
401fail:
402 mutex_unlock(&msm_iommu_lock);
403 return ret;
404}
405
406
407static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
408 unsigned int len)
409{
410 struct msm_priv *priv;
411
412 mutex_lock(&msm_iommu_lock);
413
414 priv = domain->priv;
415 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
416
417 __flush_iotlb(domain);
418 mutex_unlock(&msm_iommu_lock);
419 return 0;
420}
421
422static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
423 unsigned long va)
424{
425 struct msm_priv *priv;
426 struct msm_iommu_drvdata *iommu_drvdata;
427 struct msm_iommu_ctx_drvdata *ctx_drvdata;
428 unsigned int par;
429 void __iomem *base;
430 phys_addr_t pa = 0;
431 int ctx;
432
433 mutex_lock(&msm_iommu_lock);
434
435 priv = domain->priv;
436 if (list_empty(&priv->list_attached))
437 goto fail;
438
439 ctx_drvdata = list_entry(priv->list_attached.next,
440 struct msm_iommu_ctx_drvdata, attached_elm);
441 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
442
443 base = iommu_drvdata->base;
444 ctx = ctx_drvdata->num;
445
446 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
447 mb();
448 while (GET_CB_ATSR_ACTIVE(base, ctx))
449 cpu_relax();
450
451 par = GET_PAR(base, ctx);
452 if (par & CB_PAR_F) {
453 pa = 0;
454 } else {
455 /* We are dealing with a supersection */
456 if (par & CB_PAR_SS)
457 pa = (par & 0xFF000000) | (va & 0x00FFFFFF);
458 else /* Upper 20 bits from PAR, lower 12 from VA */
459 pa = (par & 0xFFFFF000) | (va & 0x00000FFF);
460 }
461
462fail:
463 mutex_unlock(&msm_iommu_lock);
464 return pa;
465}
466
467static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
468 unsigned long cap)
469{
470 return 0;
471}
472
473static void print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
474{
475 pr_err("FAR = %08x PAR = %08x\n",
476 GET_FAR(base, ctx), GET_PAR(base, ctx));
477 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
478 (fsr & 0x02) ? "TF " : "",
479 (fsr & 0x04) ? "AFF " : "",
480 (fsr & 0x08) ? "PF " : "",
481 (fsr & 0x10) ? "EF " : "",
482 (fsr & 0x20) ? "TLBMCF " : "",
483 (fsr & 0x40) ? "TLBLKF " : "",
484 (fsr & 0x80) ? "MHF " : "",
485 (fsr & 0x40000000) ? "SS " : "",
486 (fsr & 0x80000000) ? "MULTI " : "");
487
488 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
489 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
490 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
491 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
492 pr_err("SCTLR = %08x ACTLR = %08x\n",
493 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
494 pr_err("PRRR = %08x NMRR = %08x\n",
495 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
496}
497
498irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
499{
500 struct platform_device *pdev = dev_id;
501 struct msm_iommu_drvdata *drvdata;
502 struct msm_iommu_ctx_drvdata *ctx_drvdata;
503 unsigned int fsr;
504 int ret = IRQ_NONE;
505
506 mutex_lock(&msm_iommu_lock);
507
508 BUG_ON(!pdev);
509
510 drvdata = dev_get_drvdata(pdev->dev.parent);
511 BUG_ON(!drvdata);
512
513 ctx_drvdata = dev_get_drvdata(&pdev->dev);
514 BUG_ON(!ctx_drvdata);
515
516 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
517 if (fsr) {
518 if (!ctx_drvdata->attached_domain) {
519 pr_err("Bad domain in interrupt handler\n");
520 ret = -ENOSYS;
521 } else
522 ret = report_iommu_fault(ctx_drvdata->attached_domain,
523 &ctx_drvdata->pdev->dev,
524 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
525
526 if (ret == -ENOSYS) {
527 pr_err("Unexpected IOMMU page fault!\n");
528 pr_err("name = %s\n", drvdata->name);
529 pr_err("context = %s (%d)\n", ctx_drvdata->name,
530 ctx_drvdata->num);
531 pr_err("Interesting registers:\n");
532 print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
533 }
534
535 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
536 ret = IRQ_HANDLED;
537 } else
538 ret = IRQ_NONE;
539
540 mutex_unlock(&msm_iommu_lock);
541 return ret;
542}
543
544static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
545{
546 struct msm_priv *priv = domain->priv;
547 return __pa(priv->pt.fl_table);
548}
549
550static struct iommu_ops msm_iommu_ops = {
551 .domain_init = msm_iommu_domain_init,
552 .domain_destroy = msm_iommu_domain_destroy,
553 .attach_dev = msm_iommu_attach_dev,
554 .detach_dev = msm_iommu_detach_dev,
555 .map = msm_iommu_map,
556 .unmap = msm_iommu_unmap,
557 .map_range = msm_iommu_map_range,
558 .unmap_range = msm_iommu_unmap_range,
559 .iova_to_phys = msm_iommu_iova_to_phys,
560 .domain_has_cap = msm_iommu_domain_has_cap,
561 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
562 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
563};
564
565static int __init msm_iommu_init(void)
566{
567 msm_iommu_pagetable_init();
568 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
569 return 0;
570}
571
572subsys_initcall(msm_iommu_init);
573
574MODULE_LICENSE("GPL v2");
575MODULE_DESCRIPTION("MSM SMMU v2 Driver");