msm: iommu: Convert spinlocks to mutexes

Use a mutex to protect the IOMMU data structures to allow
use of the new clock APIs from within lock-protected
regions.

Change-Id: I89a932d42e37d7860485add1b857288fd5947aea
Signed-off-by: Stepan Moskovchenko <stepanm@codeaurora.org>
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
index 463d6c9..d1cc340 100644
--- a/arch/arm/mach-msm/iommu.c
+++ b/arch/arm/mach-msm/iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,7 +56,7 @@
 
 static int msm_iommu_tex_class[4];
 
-DEFINE_SPINLOCK(msm_iommu_lock);
+DEFINE_MUTEX(msm_iommu_lock);
 
 struct msm_priv {
 	unsigned long *pgtable;
@@ -299,11 +299,10 @@
 static void msm_iommu_domain_destroy(struct iommu_domain *domain)
 {
 	struct msm_priv *priv;
-	unsigned long flags;
 	unsigned long *fl_table;
 	int i;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 	priv = domain->priv;
 	domain->priv = NULL;
 
@@ -320,7 +319,7 @@
 	}
 
 	kfree(priv);
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 }
 
 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -331,9 +330,8 @@
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	struct msm_iommu_ctx_drvdata *tmp_drvdata;
 	int ret = 0;
-	unsigned long flags;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 
 	priv = domain->priv;
 
@@ -373,7 +371,7 @@
 	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
 
 fail:
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 	return ret;
 }
 
@@ -384,10 +382,9 @@
 	struct msm_iommu_ctx_dev *ctx_dev;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
-	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 	priv = domain->priv;
 
 	if (!priv || !dev)
@@ -412,7 +409,7 @@
 	list_del_init(&ctx_drvdata->attached_elm);
 
 fail:
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 }
 
 static int __get_pgprot(int prot, int len)
@@ -447,7 +444,6 @@
 			 phys_addr_t pa, int order, int prot)
 {
 	struct msm_priv *priv;
-	unsigned long flags;
 	unsigned long *fl_table;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
@@ -458,7 +454,7 @@
 	size_t len = 0x1000UL << order;
 	int ret = 0;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 
 	priv = domain->priv;
 	if (!priv) {
@@ -525,7 +521,7 @@
 
 		if (*fl_pte == 0) {
 			unsigned long *sl;
-			sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
+			sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
 							get_order(SZ_4K));
 
 			if (!sl) {
@@ -583,7 +579,7 @@
 
 	ret = __flush_iotlb_va(domain, va);
 fail:
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 	return ret;
 }
 
@@ -591,7 +587,6 @@
 			    int order)
 {
 	struct msm_priv *priv;
-	unsigned long flags;
 	unsigned long *fl_table;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
@@ -601,7 +596,7 @@
 	size_t len = 0x1000UL << order;
 	int i, ret = 0;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 
 	priv = domain->priv;
 
@@ -686,7 +681,7 @@
 
 	ret = __flush_iotlb_va(domain, va);
 fail:
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 	return ret;
 }
 
@@ -702,13 +697,12 @@
 	unsigned long fl_offset;
 	unsigned long *sl_table;
 	unsigned long sl_offset, sl_start;
-	unsigned long flags;
 	unsigned int chunk_offset = 0;
 	unsigned int chunk_pa;
 	int ret = 0;
 	struct msm_priv *priv;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 
 	BUG_ON(len & (SZ_4K - 1));
 
@@ -734,7 +728,7 @@
 		/* Set up a 2nd level page table if one doesn't exist */
 		if (*fl_pte == 0) {
 			sl_table = (unsigned long *)
-				 __get_free_pages(GFP_ATOMIC, get_order(SZ_4K));
+				 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
 
 			if (!sl_table) {
 				pr_debug("Could not allocate second level table\n");
@@ -782,7 +776,7 @@
 	}
 	__flush_iotlb(domain);
 fail:
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 	return ret;
 }
 
@@ -796,11 +790,10 @@
 	unsigned long fl_offset;
 	unsigned long *sl_table;
 	unsigned long sl_start, sl_end;
-	unsigned long flags;
 	int used, i;
 	struct msm_priv *priv;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 
 	BUG_ON(len & (SZ_4K - 1));
 
@@ -854,7 +847,7 @@
 	}
 
 	__flush_iotlb(domain);
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 	return 0;
 }
 
@@ -865,12 +858,11 @@
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	unsigned int par;
-	unsigned long flags;
 	void __iomem *base;
 	phys_addr_t ret = 0;
 	int ctx;
 
-	spin_lock_irqsave(&msm_iommu_lock, flags);
+	mutex_lock(&msm_iommu_lock);
 
 	priv = domain->priv;
 	if (list_empty(&priv->list_attached))
@@ -903,7 +895,7 @@
 
 	__disable_clocks(iommu_drvdata);
 fail:
-	spin_unlock_irqrestore(&msm_iommu_lock, flags);
+	mutex_unlock(&msm_iommu_lock);
 	return ret;
 }
 
@@ -947,7 +939,7 @@
 	unsigned int fsr;
 	int i, ret;
 
-	spin_lock(&msm_iommu_lock);
+	mutex_lock(&msm_iommu_lock);
 
 	if (!drvdata) {
 		pr_err("Invalid device ID in context interrupt handler\n");
@@ -975,7 +967,7 @@
 	}
 	__disable_clocks(drvdata);
 fail:
-	spin_unlock(&msm_iommu_lock);
+	mutex_unlock(&msm_iommu_lock);
 	return 0;
 }