blob: e17e1f8f94a24fcfd225b409605d5f14d446c9c4 [file] [log] [blame]
Steve Mucklef132c6c2012-06-06 18:30:57 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Steve Mucklef132c6c2012-06-06 18:30:57 -070041/* Sharability attributes of MSM IOMMU mappings */
42#define MSM_IOMMU_ATTR_NON_SH 0x0
43#define MSM_IOMMU_ATTR_SH 0x4
44
45/* Cacheability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NONCACHED 0x0
47#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
48#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
49#define MSM_IOMMU_ATTR_CACHED_WT 0x3
50
51
52static inline void clean_pte(unsigned long *start, unsigned long *end,
53 int redirect)
54{
55 if (!redirect)
56 dmac_flush_range(start, end);
57}
58
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020059/* bitmap of the page sizes currently supported */
60#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
61
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080062static int msm_iommu_tex_class[4];
63
Steve Mucklef132c6c2012-06-06 18:30:57 -070064DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070065
66struct msm_priv {
67 unsigned long *pgtable;
Steve Mucklef132c6c2012-06-06 18:30:57 -070068 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070069 struct list_head list_attached;
70};
71
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080072static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
73{
74 int ret;
75
Steve Mucklef132c6c2012-06-06 18:30:57 -070076 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080077 if (ret)
78 goto fail;
79
80 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -070081 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080082 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -070083 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080084 }
85fail:
86 return ret;
87}
88
89static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
90{
91 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -070092 clk_disable_unprepare(drvdata->clk);
93 clk_disable_unprepare(drvdata->pclk);
94}
95
96static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
97{
98 struct msm_priv *priv = domain->priv;
99 struct msm_iommu_drvdata *iommu_drvdata;
100 struct msm_iommu_ctx_drvdata *ctx_drvdata;
101 int ret = 0;
102 int asid;
103
104 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
105 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
106 BUG();
107
108 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
109 if (!iommu_drvdata)
110 BUG();
111
112 ret = __enable_clocks(iommu_drvdata);
113 if (ret)
114 goto fail;
115
116 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
117 ctx_drvdata->num);
118
119 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
120 asid | (va & TLBIVA_VA));
121 mb();
122 __disable_clocks(iommu_drvdata);
123 }
124fail:
125 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800126}
127
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800128static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700129{
130 struct msm_priv *priv = domain->priv;
131 struct msm_iommu_drvdata *iommu_drvdata;
132 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800133 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700134 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700135
136 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
137 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
138 BUG();
139
140 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700141 if (!iommu_drvdata)
142 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800143
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800144 ret = __enable_clocks(iommu_drvdata);
145 if (ret)
146 goto fail;
147
Steve Mucklef132c6c2012-06-06 18:30:57 -0700148 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
149 ctx_drvdata->num);
150
151 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
152 mb();
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800153 __disable_clocks(iommu_drvdata);
154 }
155fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800156 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700157}
158
159static void __reset_context(void __iomem *base, int ctx)
160{
161 SET_BPRCOSH(base, ctx, 0);
162 SET_BPRCISH(base, ctx, 0);
163 SET_BPRCNSH(base, ctx, 0);
164 SET_BPSHCFG(base, ctx, 0);
165 SET_BPMTCFG(base, ctx, 0);
166 SET_ACTLR(base, ctx, 0);
167 SET_SCTLR(base, ctx, 0);
168 SET_FSRRESTORE(base, ctx, 0);
169 SET_TTBR0(base, ctx, 0);
170 SET_TTBR1(base, ctx, 0);
171 SET_TTBCR(base, ctx, 0);
172 SET_BFBCR(base, ctx, 0);
173 SET_PAR(base, ctx, 0);
174 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700175 SET_TLBFLPTER(base, ctx, 0);
176 SET_TLBSLPTER(base, ctx, 0);
177 SET_TLBLKCR(base, ctx, 0);
178 SET_PRRR(base, ctx, 0);
179 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700180 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700181}
182
Steve Mucklef132c6c2012-06-06 18:30:57 -0700183static void __program_context(void __iomem *base, int ctx, int ncb,
184 phys_addr_t pgtable, int redirect,
185 int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700186{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800187 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700188 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700189 __reset_context(base, ctx);
190
191 /* Set up HTW mode */
192 /* TLB miss configuration: perform HTW on miss */
193 SET_TLBMCFG(base, ctx, 0x3);
194
195 /* V2P configuration: HTW for access */
196 SET_V2PCFG(base, ctx, 0x3);
197
Steve Mucklef132c6c2012-06-06 18:30:57 -0700198 SET_TTBCR(base, ctx, ttbr_split);
199 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
200 if (ttbr_split)
201 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700202
203 /* Enable context fault interrupt */
204 SET_CFEIE(base, ctx, 1);
205
206 /* Stall access on a context fault and let the handler deal with it */
207 SET_CFCFG(base, ctx, 1);
208
209 /* Redirect all cacheable requests to L2 slave port. */
210 SET_RCISH(base, ctx, 1);
211 SET_RCOSH(base, ctx, 1);
212 SET_RCNSH(base, ctx, 1);
213
214 /* Turn on TEX Remap */
215 SET_TRE(base, ctx, 1);
216
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800217 /* Set TEX remap attributes */
218 RCP15_PRRR(prrr);
219 RCP15_NMRR(nmrr);
220 SET_PRRR(base, ctx, prrr);
221 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700222
223 /* Turn on BFB prefetch */
224 SET_BFBDFE(base, ctx, 1);
225
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700226 /* Configure page tables as inner-cacheable and shareable to reduce
227 * the TLB miss penalty.
228 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700229 if (redirect) {
230 SET_TTBR0_SH(base, ctx, 1);
231 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700232
Steve Mucklef132c6c2012-06-06 18:30:57 -0700233 SET_TTBR0_NOS(base, ctx, 1);
234 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700235
Steve Mucklef132c6c2012-06-06 18:30:57 -0700236 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
237 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700238
Steve Mucklef132c6c2012-06-06 18:30:57 -0700239 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
240 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700241
Steve Mucklef132c6c2012-06-06 18:30:57 -0700242 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
243 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
244 }
245
246 /* Find if this page table is used elsewhere, and re-use ASID */
247 found = 0;
248 for (i = 0; i < ncb; i++)
249 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
250 i != ctx) {
251 SET_CONTEXTIDR_ASID(base, ctx, \
252 GET_CONTEXTIDR_ASID(base, i));
253 found = 1;
254 break;
255 }
256
257 /* If page table is new, find an unused ASID */
258 if (!found) {
259 for (i = 0; i < ncb; i++) {
260 found = 0;
261 for (j = 0; j < ncb; j++) {
262 if (GET_CONTEXTIDR_ASID(base, j) == i &&
263 j != ctx)
264 found = 1;
265 }
266
267 if (!found) {
268 SET_CONTEXTIDR_ASID(base, ctx, i);
269 break;
270 }
271 }
272 BUG_ON(found);
273 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700274
275 /* Enable the MMU */
276 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700277 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700278}
279
Steve Mucklef132c6c2012-06-06 18:30:57 -0700280static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700281{
282 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
283
284 if (!priv)
285 goto fail_nomem;
286
287 INIT_LIST_HEAD(&priv->list_attached);
288 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
289 get_order(SZ_16K));
290
291 if (!priv->pgtable)
292 goto fail_nomem;
293
Steve Mucklef132c6c2012-06-06 18:30:57 -0700294#ifdef CONFIG_IOMMU_PGTABLES_L2
295 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
296#endif
297
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700298 memset(priv->pgtable, 0, SZ_16K);
299 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700300
301 clean_pte(priv->pgtable, priv->pgtable + NUM_FL_PTE, priv->redirect);
302
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700303 return 0;
304
305fail_nomem:
306 kfree(priv);
307 return -ENOMEM;
308}
309
310static void msm_iommu_domain_destroy(struct iommu_domain *domain)
311{
312 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700313 unsigned long *fl_table;
314 int i;
315
Steve Mucklef132c6c2012-06-06 18:30:57 -0700316 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700317 priv = domain->priv;
318 domain->priv = NULL;
319
320 if (priv) {
321 fl_table = priv->pgtable;
322
323 for (i = 0; i < NUM_FL_PTE; i++)
324 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
325 free_page((unsigned long) __va(((fl_table[i]) &
326 FL_BASE_MASK)));
327
328 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
329 priv->pgtable = NULL;
330 }
331
332 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700333 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700334}
335
336static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
337{
338 struct msm_priv *priv;
339 struct msm_iommu_ctx_dev *ctx_dev;
340 struct msm_iommu_drvdata *iommu_drvdata;
341 struct msm_iommu_ctx_drvdata *ctx_drvdata;
342 struct msm_iommu_ctx_drvdata *tmp_drvdata;
343 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700344
Steve Mucklef132c6c2012-06-06 18:30:57 -0700345 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700346
347 priv = domain->priv;
348
349 if (!priv || !dev) {
350 ret = -EINVAL;
351 goto fail;
352 }
353
354 iommu_drvdata = dev_get_drvdata(dev->parent);
355 ctx_drvdata = dev_get_drvdata(dev);
356 ctx_dev = dev->platform_data;
357
358 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
359 ret = -EINVAL;
360 goto fail;
361 }
362
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800363 if (!list_empty(&ctx_drvdata->attached_elm)) {
364 ret = -EBUSY;
365 goto fail;
366 }
367
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700368 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
369 if (tmp_drvdata == ctx_drvdata) {
370 ret = -EBUSY;
371 goto fail;
372 }
373
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800374 ret = __enable_clocks(iommu_drvdata);
375 if (ret)
376 goto fail;
377
Steve Mucklef132c6c2012-06-06 18:30:57 -0700378 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
379 __pa(priv->pgtable), priv->redirect,
380 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700381
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800382 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700383 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700384
Steve Mucklef132c6c2012-06-06 18:30:57 -0700385 ctx_drvdata->attached_domain = domain;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700386fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700387 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700388 return ret;
389}
390
391static void msm_iommu_detach_dev(struct iommu_domain *domain,
392 struct device *dev)
393{
394 struct msm_priv *priv;
395 struct msm_iommu_ctx_dev *ctx_dev;
396 struct msm_iommu_drvdata *iommu_drvdata;
397 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800398 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700399
Steve Mucklef132c6c2012-06-06 18:30:57 -0700400 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700401 priv = domain->priv;
402
403 if (!priv || !dev)
404 goto fail;
405
406 iommu_drvdata = dev_get_drvdata(dev->parent);
407 ctx_drvdata = dev_get_drvdata(dev);
408 ctx_dev = dev->platform_data;
409
410 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
411 goto fail;
412
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800413 ret = __enable_clocks(iommu_drvdata);
414 if (ret)
415 goto fail;
416
Steve Mucklef132c6c2012-06-06 18:30:57 -0700417 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
418 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
419
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700420 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800421 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700422 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700423 ctx_drvdata->attached_domain = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700424fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700425 mutex_unlock(&msm_iommu_lock);
426}
427
428static int __get_pgprot(int prot, int len)
429{
430 unsigned int pgprot;
431 int tex;
432
433 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
434 prot |= IOMMU_READ | IOMMU_WRITE;
435 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
436 }
437
438 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
439 prot |= IOMMU_READ;
440 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
441 }
442
443 if (prot & IOMMU_CACHE)
444 tex = (pgprot_kernel >> 2) & 0x07;
445 else
446 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
447
448 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
449 return 0;
450
451 if (len == SZ_16M || len == SZ_1M) {
452 pgprot = FL_SHARED;
453 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
454 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
455 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
456 pgprot |= FL_AP0 | FL_AP1;
457 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
458 } else {
459 pgprot = SL_SHARED;
460 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
461 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
462 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
463 pgprot |= SL_AP0 | SL_AP1;
464 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
465 }
466
467 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700468}
469
470static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200471 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700472{
473 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700474 unsigned long *fl_table;
475 unsigned long *fl_pte;
476 unsigned long fl_offset;
477 unsigned long *sl_table;
478 unsigned long *sl_pte;
479 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800480 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700481 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700482
Steve Mucklef132c6c2012-06-06 18:30:57 -0700483 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800484
485 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700486 if (!priv) {
487 ret = -EINVAL;
488 goto fail;
489 }
490
491 fl_table = priv->pgtable;
492
493 if (len != SZ_16M && len != SZ_1M &&
494 len != SZ_64K && len != SZ_4K) {
495 pr_debug("Bad size: %d\n", len);
496 ret = -EINVAL;
497 goto fail;
498 }
499
500 if (!fl_table) {
501 pr_debug("Null page table\n");
502 ret = -EINVAL;
503 goto fail;
504 }
505
Steve Mucklef132c6c2012-06-06 18:30:57 -0700506 pgprot = __get_pgprot(prot, len);
507
508 if (!pgprot) {
509 ret = -EINVAL;
510 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800511 }
512
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700513 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
514 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
515
516 if (len == SZ_16M) {
517 int i = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700518
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700519 for (i = 0; i < 16; i++)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700520 if (*(fl_pte+i)) {
521 ret = -EBUSY;
522 goto fail;
523 }
524
525 for (i = 0; i < 16; i++)
526 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
527 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
528 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700529 }
530
Steve Mucklef132c6c2012-06-06 18:30:57 -0700531 if (len == SZ_1M) {
532 if (*fl_pte) {
533 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700534 goto fail;
535 }
536
Steve Mucklef132c6c2012-06-06 18:30:57 -0700537 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
538 | pgprot;
539 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
540 }
541
542 /* Need a 2nd level table */
543 if (len == SZ_4K || len == SZ_64K) {
544
545 if (*fl_pte == 0) {
546 unsigned long *sl;
547 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
548 get_order(SZ_4K));
549
550 if (!sl) {
551 pr_debug("Could not allocate second level table\n");
552 ret = -ENOMEM;
553 goto fail;
554 }
555 memset(sl, 0, SZ_4K);
556 clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
557
558 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
559 FL_TYPE_TABLE);
560
561 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
562 }
563
564 if (!(*fl_pte & FL_TYPE_TABLE)) {
565 ret = -EBUSY;
566 goto fail;
567 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700568 }
569
570 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
571 sl_offset = SL_OFFSET(va);
572 sl_pte = sl_table + sl_offset;
573
Steve Mucklef132c6c2012-06-06 18:30:57 -0700574 if (len == SZ_4K) {
575 if (*sl_pte) {
576 ret = -EBUSY;
577 goto fail;
578 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700579
Steve Mucklef132c6c2012-06-06 18:30:57 -0700580 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
581 | SL_TYPE_SMALL | pgprot;
582 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
583 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700584
585 if (len == SZ_64K) {
586 int i;
587
588 for (i = 0; i < 16; i++)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700589 if (*(sl_pte+i)) {
590 ret = -EBUSY;
591 goto fail;
592 }
593
594 for (i = 0; i < 16; i++)
595 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
596 | SL_SHARED | SL_TYPE_LARGE | pgprot;
597
598 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700599 }
600
Steve Mucklef132c6c2012-06-06 18:30:57 -0700601 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700602fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700603 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700604 return ret;
605}
606
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200607static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
608 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700609{
610 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700611 unsigned long *fl_table;
612 unsigned long *fl_pte;
613 unsigned long fl_offset;
614 unsigned long *sl_table;
615 unsigned long *sl_pte;
616 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700617 int i, ret = 0;
618
Steve Mucklef132c6c2012-06-06 18:30:57 -0700619 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700620
621 priv = domain->priv;
622
Joerg Roedel05df1f32012-01-26 18:25:37 +0100623 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700624 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700625
626 fl_table = priv->pgtable;
627
628 if (len != SZ_16M && len != SZ_1M &&
629 len != SZ_64K && len != SZ_4K) {
630 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700631 goto fail;
632 }
633
634 if (!fl_table) {
635 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700636 goto fail;
637 }
638
639 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
640 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
641
642 if (*fl_pte == 0) {
643 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700644 goto fail;
645 }
646
647 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700648 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700649 for (i = 0; i < 16; i++)
650 *(fl_pte+i) = 0;
651
Steve Mucklef132c6c2012-06-06 18:30:57 -0700652 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
653 }
654
655 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700656 *fl_pte = 0;
657
Steve Mucklef132c6c2012-06-06 18:30:57 -0700658 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
659 }
660
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700661 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
662 sl_offset = SL_OFFSET(va);
663 sl_pte = sl_table + sl_offset;
664
665 if (len == SZ_64K) {
666 for (i = 0; i < 16; i++)
667 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700668
669 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700670 }
671
Steve Mucklef132c6c2012-06-06 18:30:57 -0700672 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700673 *sl_pte = 0;
674
Steve Mucklef132c6c2012-06-06 18:30:57 -0700675 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
676 }
677
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700678 if (len == SZ_4K || len == SZ_64K) {
679 int used = 0;
680
681 for (i = 0; i < NUM_SL_PTE; i++)
682 if (sl_table[i])
683 used = 1;
684 if (!used) {
685 free_page((unsigned long)sl_table);
686 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700687
688 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700689 }
690 }
691
Steve Mucklef132c6c2012-06-06 18:30:57 -0700692 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400693
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700694fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700695 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200696
697 /* the IOMMU API requires us to return how many bytes were unmapped */
698 len = ret ? 0 : len;
699 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700700}
701
Steve Mucklef132c6c2012-06-06 18:30:57 -0700702static unsigned int get_phys_addr(struct scatterlist *sg)
703{
704 /*
705 * Try sg_dma_address first so that we can
706 * map carveout regions that do not have a
707 * struct page associated with them.
708 */
709 unsigned int pa = sg_dma_address(sg);
710 if (pa == 0)
711 pa = sg_phys(sg);
712 return pa;
713}
714
715static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
716 struct scatterlist *sg, unsigned int len,
717 int prot)
718{
719 unsigned int pa;
720 unsigned int offset = 0;
721 unsigned int pgprot;
722 unsigned long *fl_table;
723 unsigned long *fl_pte;
724 unsigned long fl_offset;
725 unsigned long *sl_table;
726 unsigned long sl_offset, sl_start;
727 unsigned int chunk_offset = 0;
728 unsigned int chunk_pa;
729 int ret = 0;
730 struct msm_priv *priv;
731
732 mutex_lock(&msm_iommu_lock);
733
734 BUG_ON(len & (SZ_4K - 1));
735
736 priv = domain->priv;
737 fl_table = priv->pgtable;
738
739 pgprot = __get_pgprot(prot, SZ_4K);
740
741 if (!pgprot) {
742 ret = -EINVAL;
743 goto fail;
744 }
745
746 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
747 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
748
749 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
750 sl_offset = SL_OFFSET(va);
751
752 chunk_pa = get_phys_addr(sg);
753 if (chunk_pa == 0) {
754 pr_debug("No dma address for sg %p\n", sg);
755 ret = -EINVAL;
756 goto fail;
757 }
758
759 while (offset < len) {
760 /* Set up a 2nd level page table if one doesn't exist */
761 if (*fl_pte == 0) {
762 sl_table = (unsigned long *)
763 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
764
765 if (!sl_table) {
766 pr_debug("Could not allocate second level table\n");
767 ret = -ENOMEM;
768 goto fail;
769 }
770
771 memset(sl_table, 0, SZ_4K);
772 clean_pte(sl_table, sl_table + NUM_SL_PTE,
773 priv->redirect);
774
775 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
776 FL_TYPE_TABLE);
777 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
778 } else
779 sl_table = (unsigned long *)
780 __va(((*fl_pte) & FL_BASE_MASK));
781
782 /* Keep track of initial position so we
783 * don't clean more than we have to
784 */
785 sl_start = sl_offset;
786
787 /* Build the 2nd level page table */
788 while (offset < len && sl_offset < NUM_SL_PTE) {
789 pa = chunk_pa + chunk_offset;
790 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
791 pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
792 sl_offset++;
793 offset += SZ_4K;
794
795 chunk_offset += SZ_4K;
796
797 if (chunk_offset >= sg->length && offset < len) {
798 chunk_offset = 0;
799 sg = sg_next(sg);
800 chunk_pa = get_phys_addr(sg);
801 if (chunk_pa == 0) {
802 pr_debug("No dma address for sg %p\n",
803 sg);
804 ret = -EINVAL;
805 goto fail;
806 }
807 }
808 }
809
810 clean_pte(sl_table + sl_start, sl_table + sl_offset,
811 priv->redirect);
812
813 fl_pte++;
814 sl_offset = 0;
815 }
816 __flush_iotlb(domain);
817fail:
818 mutex_unlock(&msm_iommu_lock);
819 return ret;
820}
821
822
823static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
824 unsigned int len)
825{
826 unsigned int offset = 0;
827 unsigned long *fl_table;
828 unsigned long *fl_pte;
829 unsigned long fl_offset;
830 unsigned long *sl_table;
831 unsigned long sl_start, sl_end;
832 int used, i;
833 struct msm_priv *priv;
834
835 mutex_lock(&msm_iommu_lock);
836
837 BUG_ON(len & (SZ_4K - 1));
838
839 priv = domain->priv;
840 fl_table = priv->pgtable;
841
842 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
843 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
844
845 sl_start = SL_OFFSET(va);
846
847 while (offset < len) {
848 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
849 sl_end = ((len - offset) / SZ_4K) + sl_start;
850
851 if (sl_end > NUM_SL_PTE)
852 sl_end = NUM_SL_PTE;
853
854 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
855 clean_pte(sl_table + sl_start, sl_table + sl_end,
856 priv->redirect);
857
858 offset += (sl_end - sl_start) * SZ_4K;
859
860 /* Unmap and free the 2nd level table if all mappings in it
861 * were removed. This saves memory, but the table will need
862 * to be re-allocated the next time someone tries to map these
863 * VAs.
864 */
865 used = 0;
866
867 /* If we just unmapped the whole table, don't bother
868 * seeing if there are still used entries left.
869 */
870 if (sl_end - sl_start != NUM_SL_PTE)
871 for (i = 0; i < NUM_SL_PTE; i++)
872 if (sl_table[i]) {
873 used = 1;
874 break;
875 }
876 if (!used) {
877 free_page((unsigned long)sl_table);
878 *fl_pte = 0;
879
880 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
881 }
882
883 sl_start = 0;
884 fl_pte++;
885 }
886
887 __flush_iotlb(domain);
888 mutex_unlock(&msm_iommu_lock);
889 return 0;
890}
891
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700892static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
893 unsigned long va)
894{
895 struct msm_priv *priv;
896 struct msm_iommu_drvdata *iommu_drvdata;
897 struct msm_iommu_ctx_drvdata *ctx_drvdata;
898 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700899 void __iomem *base;
900 phys_addr_t ret = 0;
901 int ctx;
902
Steve Mucklef132c6c2012-06-06 18:30:57 -0700903 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700904
905 priv = domain->priv;
906 if (list_empty(&priv->list_attached))
907 goto fail;
908
909 ctx_drvdata = list_entry(priv->list_attached.next,
910 struct msm_iommu_ctx_drvdata, attached_elm);
911 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
912
913 base = iommu_drvdata->base;
914 ctx = ctx_drvdata->num;
915
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800916 ret = __enable_clocks(iommu_drvdata);
917 if (ret)
918 goto fail;
919
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800920 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700921
Steve Mucklef132c6c2012-06-06 18:30:57 -0700922 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700923 par = GET_PAR(base, ctx);
924
925 /* We are dealing with a supersection */
926 if (GET_NOFAULT_SS(base, ctx))
927 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
928 else /* Upper 20 bits from PAR, lower 12 from VA */
929 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
930
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800931 if (GET_FAULT(base, ctx))
932 ret = 0;
933
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800934 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700935fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700936 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700937 return ret;
938}
939
940static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
941 unsigned long cap)
942{
943 return 0;
944}
945
946static void print_ctx_regs(void __iomem *base, int ctx)
947{
948 unsigned int fsr = GET_FSR(base, ctx);
949 pr_err("FAR = %08x PAR = %08x\n",
950 GET_FAR(base, ctx), GET_PAR(base, ctx));
951 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
952 (fsr & 0x02) ? "TF " : "",
953 (fsr & 0x04) ? "AFF " : "",
954 (fsr & 0x08) ? "APF " : "",
955 (fsr & 0x10) ? "TLBMF " : "",
956 (fsr & 0x20) ? "HTWDEEF " : "",
957 (fsr & 0x40) ? "HTWSEEF " : "",
958 (fsr & 0x80) ? "MHF " : "",
959 (fsr & 0x10000) ? "SL " : "",
960 (fsr & 0x40000000) ? "SS " : "",
961 (fsr & 0x80000000) ? "MULTI " : "");
962
963 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
964 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
965 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
966 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
967 pr_err("SCTLR = %08x ACTLR = %08x\n",
968 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
969 pr_err("PRRR = %08x NMRR = %08x\n",
970 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
971}
972
973irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
974{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700975 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
976 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700977 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700978 unsigned int fsr, num;
979 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700980
Steve Mucklef132c6c2012-06-06 18:30:57 -0700981 mutex_lock(&msm_iommu_lock);
982 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700983
Steve Mucklef132c6c2012-06-06 18:30:57 -0700984 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
985 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700986
987 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700988 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700989
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800990 ret = __enable_clocks(drvdata);
991 if (ret)
992 goto fail;
993
Steve Mucklef132c6c2012-06-06 18:30:57 -0700994 fsr = GET_FSR(base, num);
995
996 if (fsr) {
997 if (!ctx_drvdata->attached_domain) {
998 pr_err("Bad domain in interrupt handler\n");
999 ret = -ENOSYS;
1000 } else
1001 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1002 &ctx_drvdata->pdev->dev,
1003 GET_FAR(base, num), 0);
1004
1005 if (ret == -ENOSYS) {
1006 pr_err("Unexpected IOMMU page fault!\n");
1007 pr_err("name = %s\n", drvdata->name);
1008 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001009 pr_err("Interesting registers:\n");
Steve Mucklef132c6c2012-06-06 18:30:57 -07001010 print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001011 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001012
1013 SET_FSR(base, num, fsr);
1014 SET_RESUME(base, num, 1);
1015
1016 ret = IRQ_HANDLED;
1017 } else
1018 ret = IRQ_NONE;
1019
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001020 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001021fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001022 mutex_unlock(&msm_iommu_lock);
1023 return ret;
1024}
1025
1026static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1027{
1028 struct msm_priv *priv = domain->priv;
1029 return __pa(priv->pgtable);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001030}
1031
1032static struct iommu_ops msm_iommu_ops = {
1033 .domain_init = msm_iommu_domain_init,
1034 .domain_destroy = msm_iommu_domain_destroy,
1035 .attach_dev = msm_iommu_attach_dev,
1036 .detach_dev = msm_iommu_detach_dev,
1037 .map = msm_iommu_map,
1038 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001039 .map_range = msm_iommu_map_range,
1040 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001041 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001042 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001043 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001044 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001045};
1046
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001047static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1048{
1049 int i = 0;
1050 unsigned int prrr = 0;
1051 unsigned int nmrr = 0;
1052 int c_icp, c_ocp, c_mt, c_nos;
1053
1054 RCP15_PRRR(prrr);
1055 RCP15_NMRR(nmrr);
1056
1057 for (i = 0; i < NUM_TEX_CLASS; i++) {
1058 c_nos = PRRR_NOS(prrr, i);
1059 c_mt = PRRR_MT(prrr, i);
1060 c_icp = NMRR_ICP(nmrr, i);
1061 c_ocp = NMRR_OCP(nmrr, i);
1062
1063 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1064 return i;
1065 }
1066
1067 return -ENODEV;
1068}
1069
1070static void __init setup_iommu_tex_classes(void)
1071{
1072 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1073 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1074
1075 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1076 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1077
1078 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1079 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1080
1081 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1082 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1083}
1084
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001085static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001086{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001087 if (!msm_soc_version_supports_iommu_v1())
1088 return -ENODEV;
1089
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001090 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001091 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001092 return 0;
1093}
1094
1095subsys_initcall(msm_iommu_init);
1096
1097MODULE_LICENSE("GPL v2");
1098MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");