msm: iommu: Support page table cacheability attribute

Allow specifying the cacheability flags for IOMMU page
tables on a per-domain basis.

Change-Id: I03c4c1b08b03bbc5a1ba9931ed117518ff772ce5
Signed-off-by: Stepan Moskovchenko <stepanm@codeaurora.org>
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
index 442a1b4..e874663 100644
--- a/arch/arm/mach-msm/iommu.c
+++ b/arch/arm/mach-msm/iommu.c
@@ -38,14 +38,10 @@
 #define RCP15_PRRR(reg)		MRC(reg, p15, 0, c10, c2, 0)
 #define RCP15_NMRR(reg)		MRC(reg, p15, 0, c10, c2, 1)
 
-#ifndef CONFIG_IOMMU_PGTABLES_L2
 static inline void clean_pte(unsigned long *start, unsigned long *end)
 {
 	dmac_flush_range(start, end);
 }
-#else
-static inline void clean_pte(unsigned long *start, unsigned long *end) { }
-#endif
 
 static int msm_iommu_tex_class[4];
 
@@ -53,6 +49,7 @@
 
 struct msm_priv {
 	unsigned long *pgtable;
+	int redirect;
 	struct list_head list_attached;
 };
 
@@ -168,7 +165,7 @@
 }
 
 static void __program_context(void __iomem *base, int ctx, int ncb,
-			      phys_addr_t pgtable)
+			      phys_addr_t pgtable, int redirect)
 {
 	unsigned int prrr, nmrr;
 	int i, j, found;
@@ -207,25 +204,25 @@
 	/* Turn on BFB prefetch */
 	SET_BFBDFE(base, ctx, 1);
 
-#ifdef CONFIG_IOMMU_PGTABLES_L2
 	/* Configure page tables as inner-cacheable and shareable to reduce
 	 * the TLB miss penalty.
 	 */
-	SET_TTBR0_SH(base, ctx, 1);
-	SET_TTBR1_SH(base, ctx, 1);
+	if (redirect) {
+		SET_TTBR0_SH(base, ctx, 1);
+		SET_TTBR1_SH(base, ctx, 1);
 
-	SET_TTBR0_NOS(base, ctx, 1);
-	SET_TTBR1_NOS(base, ctx, 1);
+		SET_TTBR0_NOS(base, ctx, 1);
+		SET_TTBR1_NOS(base, ctx, 1);
 
-	SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
-	SET_TTBR0_IRGNL(base, ctx, 1);
+		SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
+		SET_TTBR0_IRGNL(base, ctx, 1);
 
-	SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
-	SET_TTBR1_IRGNL(base, ctx, 1);
+		SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
+		SET_TTBR1_IRGNL(base, ctx, 1);
 
-	SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
-	SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
-#endif
+		SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
+		SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
+	}
 
 	/* Find if this page table is used elsewhere, and re-use ASID */
 	found = 0;
@@ -275,6 +272,10 @@
 	if (!priv->pgtable)
 		goto fail_nomem;
 
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+	priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
+#endif
+
 	memset(priv->pgtable, 0, SZ_16K);
 	domain->priv = priv;
 	return 0;
@@ -355,7 +356,7 @@
 		goto fail;
 
 	__program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
-			  __pa(priv->pgtable));
+			  __pa(priv->pgtable), priv->redirect);
 
 	__disable_clocks(iommu_drvdata);
 	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
@@ -490,8 +491,8 @@
 			*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
 				  FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
 				  FL_SHARED | FL_NG | pgprot;
-
-		clean_pte(fl_pte, fl_pte + 16);
+		if (!priv->redirect)
+			clean_pte(fl_pte, fl_pte + 16);
 	}
 
 	if (len == SZ_1M) {
@@ -502,8 +503,8 @@
 
 		*fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
 					    FL_TYPE_SECT | FL_SHARED | pgprot;
-
-		clean_pte(fl_pte, fl_pte + 1);
+		if (!priv->redirect)
+			clean_pte(fl_pte, fl_pte + 1);
 	}
 
 	/* Need a 2nd level table */
@@ -524,7 +525,8 @@
 			*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
 						      FL_TYPE_TABLE);
 
-			clean_pte(fl_pte, fl_pte + 1);
+			if (!priv->redirect)
+				clean_pte(fl_pte, fl_pte + 1);
 		}
 
 		if (!(*fl_pte & FL_TYPE_TABLE)) {
@@ -545,7 +547,8 @@
 
 		*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
 					  SL_SHARED | SL_TYPE_SMALL | pgprot;
-		clean_pte(sl_pte, sl_pte + 1);
+		if (!priv->redirect)
+			clean_pte(sl_pte, sl_pte + 1);
 	}
 
 	if (len == SZ_64K) {
@@ -561,7 +564,8 @@
 			*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
 			    SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
 
-		clean_pte(sl_pte, sl_pte + 16);
+		if (!priv->redirect)
+			clean_pte(sl_pte, sl_pte + 16);
 	}
 
 	ret = __flush_iotlb_va(domain, va);
@@ -622,13 +626,15 @@
 		for (i = 0; i < 16; i++)
 			*(fl_pte+i) = 0;
 
-		clean_pte(fl_pte, fl_pte + 16);
+		if (!priv->redirect)
+			clean_pte(fl_pte, fl_pte + 16);
 	}
 
 	if (len == SZ_1M) {
 		*fl_pte = 0;
 
-		clean_pte(fl_pte, fl_pte + 1);
+		if (!priv->redirect)
+			clean_pte(fl_pte, fl_pte + 1);
 	}
 
 	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
@@ -639,13 +645,15 @@
 		for (i = 0; i < 16; i++)
 			*(sl_pte+i) = 0;
 
-		clean_pte(sl_pte, sl_pte + 16);
+		if (!priv->redirect)
+			clean_pte(sl_pte, sl_pte + 16);
 	}
 
 	if (len == SZ_4K) {
 		*sl_pte = 0;
 
-		clean_pte(sl_pte, sl_pte + 1);
+		if (!priv->redirect)
+			clean_pte(sl_pte, sl_pte + 1);
 	}
 
 	if (len == SZ_4K || len == SZ_64K) {
@@ -658,7 +666,8 @@
 			free_page((unsigned long)sl_table);
 			*fl_pte = 0;
 
-			clean_pte(fl_pte, fl_pte + 1);
+			if (!priv->redirect)
+				clean_pte(fl_pte, fl_pte + 1);
 		}
 	}
 
@@ -723,7 +732,8 @@
 			memset(sl_table, 0, SZ_4K);
 			*fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
 							    FL_TYPE_TABLE);
-			clean_pte(fl_pte, fl_pte + 1);
+			if (!priv->redirect)
+				clean_pte(fl_pte, fl_pte + 1);
 		} else
 			sl_table = (unsigned long *)
 					       __va(((*fl_pte) & FL_BASE_MASK));
@@ -751,7 +761,8 @@
 			}
 		}
 
-		clean_pte(sl_table + sl_start, sl_table + sl_offset);
+		if (!priv->redirect)
+			clean_pte(sl_table + sl_start, sl_table + sl_offset);
 
 		fl_pte++;
 		sl_offset = 0;
@@ -796,7 +807,8 @@
 			sl_end = NUM_SL_PTE;
 
 		memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
-		clean_pte(sl_table + sl_start, sl_table + sl_end);
+		if (!priv->redirect)
+			clean_pte(sl_table + sl_start, sl_table + sl_end);
 
 		offset += (sl_end - sl_start) * SZ_4K;
 
@@ -820,7 +832,8 @@
 			free_page((unsigned long)sl_table);
 			*fl_pte = 0;
 
-			clean_pte(fl_pte, fl_pte + 1);
+			if (!priv->redirect)
+				clean_pte(fl_pte, fl_pte + 1);
 		}
 
 		sl_start = 0;