gpu: ion: Add support for iommus

Add infrastructure to support mapping allocations
into iommus.

Change-Id: Ia5eafebee408e297013bf55284abf67d9eb8d78b
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index c0a47d8..3911950 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,3 +1,3 @@
-obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
+obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o
 obj-$(CONFIG_ION_TEGRA) += tegra/
 obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 48dc9c0..e6a1b86 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -30,6 +30,7 @@
 #include <linux/uaccess.h>
 #include <linux/debugfs.h>
 
+#include <mach/iommu_domains.h>
 #include "ion_priv.h"
 #define DEBUG
 
@@ -102,8 +103,27 @@
 	unsigned int kmap_cnt;
 	unsigned int dmap_cnt;
 	unsigned int usermap_cnt;
+	unsigned int iommu_map_cnt;
 };
 
+static int ion_validate_buffer_flags(struct ion_buffer *buffer,
+					unsigned long flags)
+{
+	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
+		buffer->iommu_map_cnt) {
+		if (buffer->flags != flags) {
+			pr_err("%s: buffer was already mapped with flags %lx,"
+				" cannot map with flags %lx\n", __func__,
+				buffer->flags, flags);
+			return 1;
+		}
+
+	} else {
+		buffer->flags = flags;
+	}
+	return 0;
+}
+
 /* this function should only be called while dev->lock is held */
 static void ion_buffer_add(struct ion_device *dev,
 			   struct ion_buffer *buffer)
@@ -130,6 +150,61 @@
 	rb_insert_color(&buffer->node, &dev->buffers);
 }
 
+void ion_iommu_add(struct ion_buffer *buffer,
+			  struct ion_iommu_map *iommu)
+{
+	struct rb_node **p = &buffer->iommu_maps.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_iommu_map *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_iommu_map, node);
+
+		if (iommu->key < entry->key) {
+			p = &(*p)->rb_left;
+		} else if (iommu->key > entry->key) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: buffer %p already has mapping for domain %d"
+				" and partition %d\n", __func__,
+				buffer,
+				iommu_map_domain(iommu),
+				iommu_map_partition(iommu));
+			BUG();
+		}
+	}
+
+	rb_link_node(&iommu->node, parent, p);
+	rb_insert_color(&iommu->node, &buffer->iommu_maps);
+
+}
+
+static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
+						unsigned int domain_no,
+						unsigned int partition_no)
+{
+	struct rb_node **p = &buffer->iommu_maps.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_iommu_map *entry;
+	uint64_t key = domain_no;
+	key = key << 32 | partition_no;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_iommu_map, node);
+
+		if (key < entry->key)
+			p = &(*p)->rb_left;
+		else if (key > entry->key)
+			p = &(*p)->rb_right;
+		else
+			return entry;
+	}
+
+	return NULL;
+}
+
 /* this function should only be called while dev->lock is held */
 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 				     struct ion_device *dev,
@@ -433,17 +508,9 @@
 		return ERR_PTR(-ENODEV);
 	}
 
-	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
-		if (buffer->flags != flags) {
-			pr_err("%s: buffer was already mapped with flags %lx,"
-				" cannot map with flags %lx\n", __func__,
-				buffer->flags, flags);
+	if (ion_validate_buffer_flags(buffer, flags)) {
 			vaddr = ERR_PTR(-EEXIST);
 			goto out;
-		}
-
-	} else {
-		buffer->flags = flags;
 	}
 
 	if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
@@ -462,6 +529,179 @@
 	return vaddr;
 }
 
+int __ion_iommu_map(struct ion_buffer *buffer,
+		int domain_num, int partition_num, unsigned long align,
+		unsigned long iova_length, unsigned long flags,
+		unsigned long *iova)
+{
+	struct ion_iommu_map *data;
+	int ret;
+
+	data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+	if (!data)
+		return -ENOMEM;
+
+	data->buffer = buffer;
+	iommu_map_domain(data) = domain_num;
+	iommu_map_partition(data) = partition_num;
+
+	ret = buffer->heap->ops->map_iommu(buffer, data,
+						domain_num,
+						partition_num,
+						align,
+						iova_length,
+						flags);
+
+	if (ret)
+		goto out;
+
+	kref_init(&data->ref);
+	*iova = data->iova_addr;
+
+	ion_iommu_add(buffer, data);
+
+	return 0;
+
+out:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				buffer->size);
+	kfree(data);
+	return ret;
+}
+
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+			int domain_num, int partition_num, unsigned long align,
+			unsigned long iova_length, unsigned long *iova,
+			unsigned long *buffer_size,
+			unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	struct ion_iommu_map *iommu_map;
+	int ret = 0;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_kernel.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+
+	if (!handle->buffer->heap->ops->map_iommu) {
+		pr_err("%s: map_iommu is not implemented by this heap.\n",
+		       __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (ion_validate_buffer_flags(buffer, flags)) {
+		ret = -EEXIST;
+		goto out;
+	}
+
+	/*
+	 * If clients don't want a custom iova length, just use whatever
+	 * the buffer size is
+	 */
+	if (!iova_length)
+		iova_length = buffer->size;
+
+	if (buffer->size > iova_length) {
+		pr_debug("%s: iova length %lx is not at least buffer size"
+			" %x\n", __func__, iova_length, buffer->size);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (buffer->size & ~PAGE_MASK) {
+		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
+			buffer->size, PAGE_SIZE);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (iova_length & ~PAGE_MASK) {
+		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
+			iova_length, PAGE_SIZE);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
+	if (_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt) ||
+		!iommu_map) {
+		ret = __ion_iommu_map(buffer, domain_num, partition_num, align,
+					iova_length, flags, iova);
+		if (ret < 0)
+			_ion_unmap(&buffer->iommu_map_cnt,
+				   &handle->iommu_map_cnt);
+	} else {
+		if (iommu_map->mapped_size != iova_length) {
+			pr_err("%s: handle %p is already mapped with length"
+				" %x, trying to map with length %lx\n",
+				__func__, handle, iommu_map->mapped_size,
+				iova_length);
+			_ion_unmap(&buffer->iommu_map_cnt,
+				   &handle->iommu_map_cnt);
+			ret = -EINVAL;
+		} else {
+			kref_get(&iommu_map->ref);
+			*iova = iommu_map->iova_addr;
+		}
+	}
+	*buffer_size = buffer->size;
+out:
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+static void ion_iommu_release(struct kref *kref)
+{
+	struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
+						ref);
+	struct ion_buffer *buffer = map->buffer;
+
+	rb_erase(&map->node, &buffer->iommu_maps);
+	buffer->heap->ops->unmap_iommu(map);
+	kfree(map);
+}
+
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+			int domain_num, int partition_num)
+{
+	struct ion_iommu_map *iommu_map;
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	buffer = handle->buffer;
+
+	mutex_lock(&buffer->lock);
+
+	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
+
+	if (!iommu_map) {
+		WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+				domain_num, partition_num, buffer);
+		goto out;
+	}
+
+	_ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
+	kref_put(&iommu_map->ref, ion_iommu_release);
+
+out:
+	mutex_unlock(&buffer->lock);
+
+	mutex_unlock(&client->lock);
+
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+
 struct scatterlist *ion_map_dma(struct ion_client *client,
 				struct ion_handle *handle,
 				unsigned long flags)
@@ -487,17 +727,9 @@
 		return ERR_PTR(-ENODEV);
 	}
 
-	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
-		if (buffer->flags != flags) {
-			pr_err("%s: buffer was already mapped with flags %lx,"
-				" cannot map with flags %lx\n", __func__,
-				buffer->flags, flags);
-			sglist = ERR_PTR(-EEXIST);
-			goto out;
-		}
-
-	} else {
-		buffer->flags = flags;
+	if (ion_validate_buffer_flags(buffer, flags)) {
+		sglist = ERR_PTR(-EEXIST);
+		goto out;
 	}
 
 	if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
@@ -888,6 +1120,28 @@
 }
 EXPORT_SYMBOL(ion_handle_get_flags);
 
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+			unsigned long *size)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	*size = buffer->size;
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_size);
+
 static int ion_share_release(struct inode *inode, struct file* file)
 {
 	struct ion_buffer *buffer = file->private_data;
@@ -1001,19 +1255,13 @@
 	}
 
 	mutex_lock(&buffer->lock);
-	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
-		if (buffer->flags != flags) {
-			pr_err("%s: buffer was already mapped with flags %lx,"
-				" cannot map with flags %lx\n", __func__,
-				buffer->flags, flags);
-			ret = -EEXIST;
-			mutex_unlock(&buffer->lock);
-			goto err1;
-		}
 
-	} else {
-		buffer->flags = flags;
+	if (ion_validate_buffer_flags(buffer, flags)) {
+		ret = -EEXIST;
+		mutex_unlock(&buffer->lock);
+		goto err1;
 	}
+
 	/* now map it to userspace */
 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
 						flags);
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 700bb79..b03fa23 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -2,6 +2,7 @@
  * drivers/gpu/ion/ion_carveout_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -23,8 +24,10 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/iommu.h>
 #include "ion_priv.h"
 
+#include <mach/iommu_domains.h>
 #include <asm/mach/map.h>
 
 struct ion_carveout_heap {
@@ -232,6 +235,108 @@
 	return carveout_heap->total_size;
 }
 
+int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
+					struct ion_iommu_map *data,
+					unsigned int domain_num,
+					unsigned int partition_num,
+					unsigned long align,
+					unsigned long iova_length,
+					unsigned long flags)
+{
+	unsigned long temp_phys, temp_iova;
+	struct iommu_domain *domain;
+	int i, ret = 0;
+	unsigned long extra;
+
+	data->mapped_size = iova_length;
+
+	if (!msm_use_iommu()) {
+		data->iova_addr = buffer->priv_phys;
+		return 0;
+	}
+
+	extra = iova_length - buffer->size;
+
+	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align);
+
+	if (!data->iova_addr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	temp_iova = data->iova_addr;
+	temp_phys = buffer->priv_phys;
+	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
+						  temp_phys += SZ_4K) {
+		ret = iommu_map(domain, temp_iova, temp_phys,
+				get_order(SZ_4K),
+				ION_IS_CACHED(flags) ? 1 : 0);
+
+		if (ret) {
+			pr_err("%s: could not map %lx to %lx in domain %p\n",
+				__func__, temp_iova, temp_phys, domain);
+			goto out2;
+		}
+	}
+
+	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
+		goto out2;
+
+	return 0;
+
+
+out2:
+	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+out:
+
+	return ret;
+}
+
+void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+	int i;
+	unsigned long temp_iova;
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	if (!msm_use_iommu())
+		return;
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	temp_iova = data->iova_addr;
+	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
+		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
 static struct ion_heap_ops carveout_heap_ops = {
 	.allocate = ion_carveout_heap_allocate,
 	.free = ion_carveout_heap_free,
@@ -245,6 +350,8 @@
 	.cache_op = ion_carveout_cache_ops,
 	.get_allocated = ion_carveout_get_allocated,
 	.get_total = ion_carveout_get_total,
+	.map_iommu = ion_carveout_heap_map_iommu,
+	.unmap_iommu = ion_carveout_heap_unmap_iommu,
 };
 
 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 8ce3c19..900f445 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -32,6 +32,9 @@
 	case ION_HEAP_TYPE_CARVEOUT:
 		heap = ion_carveout_heap_create(heap_data);
 		break;
+	case ION_HEAP_TYPE_IOMMU:
+		heap = ion_iommu_heap_create(heap_data);
+		break;
 	default:
 		pr_err("%s: Invalid heap type %d\n", __func__,
 		       heap_data->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
new file mode 100644
index 0000000..d37a811
--- /dev/null
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <linux/pfn.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+#include <asm/page.h>
+#include <mach/iommu_domains.h>
+
+struct ion_iommu_heap {
+	struct ion_heap heap;
+};
+
+struct ion_iommu_priv_data {
+	struct page **pages;
+	int nrpages;
+	unsigned long size;
+};
+
+static int ion_iommu_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	int ret, i;
+	struct ion_iommu_priv_data *data = NULL;
+
+	if (msm_use_iommu()) {
+		data = kmalloc(sizeof(*data), GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		data->size = PFN_ALIGN(size);
+		data->nrpages = data->size >> PAGE_SHIFT;
+		data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
+				GFP_KERNEL);
+		if (!data->pages) {
+			ret = -ENOMEM;
+			goto err1;
+		}
+
+		for (i = 0; i < data->nrpages; i++) {
+			data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+			if (!data->pages[i])
+				goto err2;
+		}
+
+
+		buffer->priv_virt = data;
+		return 0;
+
+	} else {
+		return -ENOMEM;
+	}
+
+
+err2:
+	for (i = 0; i < data->nrpages; i++) {
+		if (data->pages[i])
+			__free_page(data->pages[i]);
+	}
+	kfree(data->pages);
+err1:
+	kfree(data);
+	return ret;
+}
+
+static void ion_iommu_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_iommu_priv_data *data = buffer->priv_virt;
+	int i;
+
+	if (!data)
+		return;
+
+	for (i = 0; i < data->nrpages; i++)
+		__free_page(data->pages[i]);
+
+	kfree(data->pages);
+	kfree(data);
+}
+
+void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
+				   struct ion_buffer *buffer,
+				   unsigned long flags)
+{
+	struct ion_iommu_priv_data *data = buffer->priv_virt;
+	pgprot_t page_prot = PAGE_KERNEL;
+
+	if (!data)
+		return NULL;
+
+	if (!ION_IS_CACHED(flags))
+		page_prot = pgprot_noncached(page_prot);
+
+	buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
+
+	return buffer->vaddr;
+}
+
+void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
+				    struct ion_buffer *buffer)
+{
+	if (!buffer->vaddr)
+		return;
+
+	vunmap(buffer->vaddr);
+	buffer->vaddr = NULL;
+}
+
+int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+			       struct vm_area_struct *vma, unsigned long flags)
+{
+	struct ion_iommu_priv_data *data = buffer->priv_virt;
+	int i;
+
+	if (!data)
+		return -EINVAL;
+
+	if (!ION_IS_CACHED(flags))
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	for (i = 0; i < data->nrpages; i++)
+		if (vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+			data->pages[i]))
+			/*
+			 * This will fail the mmap which will
+			 * clean up the vma space properly.
+			 */
+			return -EINVAL;
+
+	return 0;
+}
+
+int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
+					struct ion_iommu_map *data,
+					unsigned int domain_num,
+					unsigned int partition_num,
+					unsigned long align,
+					unsigned long iova_length,
+					unsigned long flags)
+{
+	unsigned long temp_iova;
+	struct iommu_domain *domain;
+	struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
+	int i, j, ret = 0;
+	unsigned long extra;
+
+	BUG_ON(!msm_use_iommu());
+
+	data->mapped_size = iova_length;
+	extra = iova_length - buffer->size;
+
+	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align);
+
+	if (!data->iova_addr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	temp_iova = data->iova_addr;
+	for (i = buffer->size, j = 0; i > 0; j++, i -= SZ_4K,
+						temp_iova += SZ_4K) {
+		ret = iommu_map(domain, temp_iova,
+				page_to_phys(buffer_data->pages[j]),
+				get_order(SZ_4K),
+				ION_IS_CACHED(flags) ? 1 : 0);
+
+		if (ret) {
+			pr_err("%s: could not map %lx to %x in domain %p\n",
+				__func__, temp_iova,
+				page_to_phys(buffer_data->pages[j]),
+				domain);
+			goto out2;
+		}
+	}
+
+
+	if (extra &&
+		msm_iommu_map_extra
+			(domain, temp_iova, extra, flags) < 0)
+		goto out2;
+
+	return 0;
+
+
+out2:
+	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				buffer->size);
+
+out:
+
+	return ret;
+}
+
+void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+	int i;
+	unsigned long temp_iova;
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	BUG_ON(!msm_use_iommu());
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	temp_iova = data->iova_addr;
+	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
+		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
+
+static struct ion_heap_ops iommu_heap_ops = {
+	.allocate = ion_iommu_heap_allocate,
+	.free = ion_iommu_heap_free,
+	.map_user = ion_iommu_heap_map_user,
+	.map_kernel = ion_iommu_heap_map_kernel,
+	.unmap_kernel = ion_iommu_heap_unmap_kernel,
+	.map_iommu = ion_iommu_heap_map_iommu,
+	.unmap_iommu = ion_iommu_heap_unmap_iommu,
+};
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_iommu_heap *iommu_heap;
+
+	iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
+	if (!iommu_heap)
+		return ERR_PTR(-ENOMEM);
+
+	iommu_heap->heap.ops = &iommu_heap_ops;
+	iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
+
+	return &iommu_heap->heap;
+}
+
+void ion_iommu_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_iommu_heap *iommu_heap =
+	     container_of(heap, struct  ion_iommu_heap, heap);
+
+	kfree(iommu_heap);
+	iommu_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index ac51854..77b73e2 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -22,6 +22,7 @@
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <linux/ion.h>
+#include <linux/iommu.h>
 
 struct ion_mapping;
 
@@ -35,6 +36,34 @@
 	void *vaddr;
 };
 
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @domain_info - contains the partition number and domain number
+ *		domain_info[1] = domain number
+ *		domain_info[0] = partition number
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ *		(may not be the same as the buffer size)
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
+struct ion_iommu_map {
+	unsigned long iova_addr;
+	struct rb_node node;
+	union {
+		int domain_info[2];
+		uint64_t key;
+	};
+	struct ion_buffer *buffer;
+	struct kref ref;
+	int mapped_size;
+};
+
 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
 
 /**
@@ -72,6 +101,8 @@
 	int dmap_cnt;
 	struct scatterlist *sglist;
 	int umap_cnt;
+	unsigned int iommu_map_cnt;
+	struct rb_root iommu_maps;
 	int marked;
 };
 
@@ -109,6 +140,15 @@
 			unsigned int length, unsigned int cmd);
 	unsigned long (*get_allocated)(struct ion_heap *heap);
 	unsigned long (*get_total)(struct ion_heap *heap);
+	int (*map_iommu)(struct ion_buffer *buffer,
+				struct ion_iommu_map *map_data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags);
+	void (*unmap_iommu)(struct ion_iommu_map *data);
+
 };
 
 /**
@@ -136,6 +176,11 @@
 	const char *name;
 };
 
+
+
+#define iommu_map_domain(__m)		((__m)->domain_info[1])
+#define iommu_map_partition(__m)	((__m)->domain_info[0])
+
 /**
  * ion_device_create - allocates and returns an ion device
  * @custom_ioctl:	arch specific ioctl function if applicable
@@ -177,6 +222,10 @@
 
 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
 void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
+void ion_iommu_heap_destroy(struct ion_heap *);
+
 /**
  * kernel api to allocate/free from carveout -- used when carveout is
  * used to back an architecture specific custom heap
@@ -185,6 +234,9 @@
 				      unsigned long align);
 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
 		       unsigned long size);
+
+
+struct ion_heap *msm_get_contiguous_heap(void);
 /**
  * The carveout heap returns physical addresses, since 0 may be a valid
  * physical address, this is used to indicate allocation failed
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index b26d48c..5957658 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -2,6 +2,7 @@
  * drivers/gpu/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -20,6 +21,8 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <mach/iommu_domains.h>
 #include "ion_priv.h"
 #include <mach/memory.h>
 
@@ -98,6 +101,37 @@
 {
 }
 
+void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+	int i;
+	unsigned long temp_iova;
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	if (!msm_use_iommu())
+		return;
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	temp_iova = data->iova_addr;
+	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
+		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
 			     struct vm_area_struct *vma, unsigned long flags)
 {
@@ -160,6 +194,77 @@
 	return atomic_read(&system_heap_allocated);
 }
 
+int ion_system_heap_map_iommu(struct ion_buffer *buffer,
+				struct ion_iommu_map *data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags)
+{
+	int ret, i;
+	unsigned long temp_iova;
+	struct iommu_domain *domain;
+	void *temp_phys;
+	unsigned long extra;
+
+	if (!ION_IS_CACHED(flags))
+		return -EINVAL;
+
+	if (!msm_use_iommu())
+		return -EINVAL;
+
+	data->mapped_size = iova_length;
+	extra = iova_length - buffer->size;
+
+	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align);
+
+	if (!data->iova_addr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	temp_iova = data->iova_addr;
+	temp_phys = buffer->vaddr;
+	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
+						  temp_phys += SZ_4K) {
+		ret = iommu_map(domain, temp_iova,
+			page_to_phys(vmalloc_to_page(temp_phys)),
+			get_order(SZ_4K), ION_IS_CACHED(flags) ? 1 : 0);
+
+		if (ret) {
+			pr_err("%s: could not map %lx to %x in domain %p\n",
+				__func__, temp_iova,
+				page_to_phys(vmalloc_to_page(temp_phys)),
+				domain);
+			goto out2;
+		}
+	}
+
+	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
+		goto out2;
+
+	return 0;
+
+out2:
+	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+						data->mapped_size);
+out:
+	return ret;
+}
+
 static struct ion_heap_ops vmalloc_ops = {
 	.allocate = ion_system_heap_allocate,
 	.free = ion_system_heap_free,
@@ -170,6 +275,8 @@
 	.map_user = ion_system_heap_map_user,
 	.cache_op = ion_system_heap_cache_ops,
 	.get_allocated = ion_system_heap_get_allocated,
+	.map_iommu = ion_system_heap_map_iommu,
+	.unmap_iommu = ion_system_heap_unmap_iommu,
 };
 
 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
@@ -285,6 +392,74 @@
 	return atomic_read(&system_contig_heap_allocated);
 }
 
+int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
+				struct ion_iommu_map *data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags)
+{
+	int ret, i;
+	struct iommu_domain *domain;
+	unsigned long temp_phys, temp_iova;
+	unsigned long extra;
+
+	if (!ION_IS_CACHED(flags))
+		return -EINVAL;
+
+	if (!msm_use_iommu()) {
+		data->iova_addr = virt_to_phys(buffer->vaddr);
+		return 0;
+	}
+
+	data->mapped_size = iova_length;
+	extra = iova_length - buffer->size;
+
+	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align);
+
+	if (!data->iova_addr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+	temp_iova = data->iova_addr;
+	temp_phys = virt_to_phys(buffer->vaddr);
+	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
+						  temp_phys += SZ_4K) {
+		ret = iommu_map(domain, temp_iova,
+			temp_phys,
+			get_order(SZ_4K), ION_IS_CACHED(flags) ? 1 : 0);
+
+		if (ret) {
+			pr_err("%s: could not map %lx to %lx in domain %p\n",
+				__func__, temp_iova, temp_phys, domain);
+			goto out2;
+		}
+	}
+
+	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
+		goto out2;
+
+	return 0;
+out2:
+	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+						data->mapped_size);
+out:
+	return ret;
+}
+
 static struct ion_heap_ops kmalloc_ops = {
 	.allocate = ion_system_contig_heap_allocate,
 	.free = ion_system_contig_heap_free,
@@ -296,6 +471,8 @@
 	.map_user = ion_system_contig_heap_map_user,
 	.cache_op = ion_system_contig_heap_cache_ops,
 	.get_allocated = ion_system_contig_heap_get_allocated,
+	.map_iommu = ion_system_contig_heap_map_iommu,
+	.unmap_iommu = ion_system_heap_unmap_iommu,
 };
 
 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 54dd056..0c96eaf 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -89,6 +89,7 @@
 			heaps[i] = 0;
 			continue;
 		}
+
 		ion_device_add_heap(idev, heaps[i]);
 	}
 	platform_set_drvdata(pdev, idev);