Initial commit from HTC m7ul-3.4.10-jb-crc-ddcfb8c
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
new file mode 100644
index 0000000..5bb254b
--- /dev/null
+++ b/drivers/gpu/ion/Kconfig
@@ -0,0 +1,18 @@
+menuconfig ION
+	tristate "Ion Memory Manager"
+	select GENERIC_ALLOCATOR
+	select DMA_SHARED_BUFFER
+	help
+	  Chose this option to enable the ION Memory Manager.
+
+config ION_TEGRA
+	tristate "Ion for Tegra"
+	depends on ARCH_TEGRA && ION
+	help
+	  Choose this option if you wish to use ion on an nVidia Tegra.
+
+config ION_MSM
+	tristate "Ion for MSM"
+	depends on ARCH_MSM && ION
+	help
+	  Choose this option if you wish to use ion on an MSM target.
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
new file mode 100644
index 0000000..c9e8a94
--- /dev/null
+++ b/drivers/gpu/ion/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
+obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
new file mode 100644
index 0000000..c9df909
--- /dev/null
+++ b/drivers/gpu/ion/ion.c
@@ -0,0 +1,1882 @@
+/*
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/ion.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include <mach/iommu_domains.h>
+#include "ion_priv.h"
+#define DEBUG
+
+struct ion_device {
+	struct miscdevice dev;
+	struct rb_root buffers;
+	struct mutex lock;
+	struct rb_root heaps;
+	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+			      unsigned long arg);
+	struct rb_root clients;
+	struct dentry *debug_root;
+};
+
+struct ion_client {
+	struct rb_node node;
+	struct ion_device *dev;
+	struct rb_root handles;
+	struct mutex lock;
+	unsigned int heap_mask;
+	char *name;
+	struct task_struct *task;
+	pid_t pid;
+	struct dentry *debug_root;
+};
+
+struct ion_handle {
+	struct kref ref;
+	struct ion_client *client;
+	struct ion_buffer *buffer;
+	struct rb_node node;
+	unsigned int kmap_cnt;
+	unsigned int iommu_map_cnt;
+};
+
+static void ion_iommu_release(struct kref *kref);
+
+static int ion_validate_buffer_flags(struct ion_buffer *buffer,
+					unsigned long flags)
+{
+	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
+		buffer->iommu_map_cnt) {
+		if (buffer->flags != flags) {
+			pr_err("%s: buffer was already mapped with flags %lx,"
+				" cannot map with flags %lx\n", __func__,
+				buffer->flags, flags);
+			return 1;
+		}
+
+	} else {
+		buffer->flags = flags;
+	}
+	return 0;
+}
+
+static void ion_buffer_add(struct ion_device *dev,
+			   struct ion_buffer *buffer)
+{
+	struct rb_node **p = &dev->buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_buffer *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_buffer, node);
+
+		if (buffer < entry) {
+			p = &(*p)->rb_left;
+		} else if (buffer > entry) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: buffer already found.", __func__);
+			BUG();
+		}
+	}
+
+	rb_link_node(&buffer->node, parent, p);
+	rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+static void ion_iommu_add(struct ion_buffer *buffer,
+			  struct ion_iommu_map *iommu)
+{
+	struct rb_node **p = &buffer->iommu_maps.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_iommu_map *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_iommu_map, node);
+
+		if (iommu->key < entry->key) {
+			p = &(*p)->rb_left;
+		} else if (iommu->key > entry->key) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: buffer %p already has mapping for domain %d"
+				" and partition %d\n", __func__,
+				buffer,
+				iommu_map_domain(iommu),
+				iommu_map_partition(iommu));
+			BUG();
+		}
+	}
+
+	rb_link_node(&iommu->node, parent, p);
+	rb_insert_color(&iommu->node, &buffer->iommu_maps);
+
+}
+
+static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
+						unsigned int domain_no,
+						unsigned int partition_no)
+{
+	struct rb_node **p = &buffer->iommu_maps.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_iommu_map *entry;
+	uint64_t key = domain_no;
+	key = key << 32 | partition_no;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_iommu_map, node);
+
+		if (key < entry->key)
+			p = &(*p)->rb_left;
+		else if (key > entry->key)
+			p = &(*p)->rb_right;
+		else
+			return entry;
+	}
+
+	return NULL;
+}
+
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+				     struct ion_device *dev,
+				     unsigned long len,
+				     unsigned long align,
+				     unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	struct sg_table *table;
+	int ret;
+
+	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+	if (!buffer)
+		return ERR_PTR(-ENOMEM);
+
+	buffer->heap = heap;
+	kref_init(&buffer->ref);
+
+	ret = heap->ops->allocate(heap, buffer, len, align, flags);
+	if (ret) {
+		kfree(buffer);
+		return ERR_PTR(ret);
+	}
+
+	buffer->dev = dev;
+	buffer->size = len;
+
+	table = buffer->heap->ops->map_dma(buffer->heap, buffer);
+	if (IS_ERR_OR_NULL(table)) {
+		heap->ops->free(buffer);
+		kfree(buffer);
+		return ERR_PTR(PTR_ERR(table));
+	}
+	buffer->sg_table = table;
+
+	mutex_init(&buffer->lock);
+	ion_buffer_add(dev, buffer);
+	return buffer;
+}
+
+static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
+{
+	struct ion_iommu_map *iommu_map;
+	struct rb_node *node;
+	const struct rb_root *rb = &(buffer->iommu_maps);
+	unsigned long ref_count;
+	unsigned int delayed_unmap;
+
+	mutex_lock(&buffer->lock);
+
+	while ((node = rb_first(rb)) != 0) {
+		iommu_map = rb_entry(node, struct ion_iommu_map, node);
+		ref_count = atomic_read(&iommu_map->ref.refcount);
+		delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
+
+		if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
+			pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
+				__func__, iommu_map->domain_info[DI_DOMAIN_NUM],
+				iommu_map->domain_info[DI_PARTITION_NUM]);
+		}
+		
+		kref_init(&iommu_map->ref);
+		kref_put(&iommu_map->ref, ion_iommu_release);
+	}
+
+	mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_destroy(struct kref *kref)
+{
+	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+	struct ion_device *dev = buffer->dev;
+
+	if (WARN_ON(buffer->kmap_cnt > 0))
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+
+	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
+	ion_iommu_delayed_unmap(buffer);
+	buffer->heap->ops->free(buffer);
+	mutex_lock(&dev->lock);
+	rb_erase(&buffer->node, &dev->buffers);
+	mutex_unlock(&dev->lock);
+	kfree(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+	kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+	return kref_put(&buffer->ref, ion_buffer_destroy);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+				     struct ion_buffer *buffer)
+{
+	struct ion_handle *handle;
+
+	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+	if (!handle)
+		return ERR_PTR(-ENOMEM);
+	kref_init(&handle->ref);
+	rb_init_node(&handle->node);
+	handle->client = client;
+	ion_buffer_get(buffer);
+	handle->buffer = buffer;
+
+	return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+	struct ion_client *client = handle->client;
+	struct ion_buffer *buffer = handle->buffer;
+
+	mutex_lock(&buffer->lock);
+	while (handle->kmap_cnt)
+		ion_handle_kmap_put(handle);
+	mutex_unlock(&buffer->lock);
+
+	if (!RB_EMPTY_NODE(&handle->node))
+		rb_erase(&handle->node, &client->handles);
+
+	ion_buffer_put(buffer);
+	kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+	return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+	kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+	return kref_put(&handle->ref, ion_handle_destroy);
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+					    struct ion_buffer *buffer)
+{
+	struct rb_node *n;
+
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		if (handle->buffer == buffer)
+			return handle;
+	}
+	return NULL;
+}
+
+static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+{
+	struct rb_node *n = client->handles.rb_node;
+
+	while (n) {
+		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
+							  node);
+		if (handle < handle_node)
+			n = n->rb_left;
+		else if (handle > handle_node)
+			n = n->rb_right;
+		else
+			return true;
+	}
+	return false;
+}
+
+static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+	struct rb_node **p = &client->handles.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_handle *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_handle, node);
+
+		if (handle < entry)
+			p = &(*p)->rb_left;
+		else if (handle > entry)
+			p = &(*p)->rb_right;
+		else
+			WARN(1, "%s: buffer already found.", __func__);
+	}
+
+	rb_link_node(&handle->node, parent, p);
+	rb_insert_color(&handle->node, &client->handles);
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int flags)
+{
+	struct rb_node *n;
+	struct ion_handle *handle;
+	struct ion_device *dev = client->dev;
+	struct ion_buffer *buffer = NULL;
+	unsigned long secure_allocation = flags & ION_SECURE;
+	const unsigned int MAX_DBG_STR_LEN = 64;
+	char dbg_str[MAX_DBG_STR_LEN];
+	unsigned int dbg_str_idx = 0;
+
+	dbg_str[0] = '\0';
+
+	if (WARN_ON(!len))
+		return ERR_PTR(-EINVAL);
+
+	len = PAGE_ALIGN(len);
+
+	mutex_lock(&dev->lock);
+	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
+		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+		
+		if (!((1 << heap->type) & client->heap_mask))
+			continue;
+		
+		if (!((1 << heap->id) & flags))
+			continue;
+		
+		if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
+			continue;
+		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		if (!IS_ERR_OR_NULL(buffer))
+			break;
+		if (dbg_str_idx < MAX_DBG_STR_LEN) {
+			unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
+			int ret_value = snprintf(&dbg_str[dbg_str_idx],
+						len_left, "%s ", heap->name);
+			if (ret_value >= len_left) {
+				
+				dbg_str[MAX_DBG_STR_LEN-1] = '\0';
+				dbg_str_idx = MAX_DBG_STR_LEN;
+			} else if (ret_value >= 0) {
+				dbg_str_idx += ret_value;
+			} else {
+				
+				dbg_str[MAX_DBG_STR_LEN-1] = '\0';
+			}
+		}
+	}
+	mutex_unlock(&dev->lock);
+
+	if (buffer == NULL)
+		return ERR_PTR(-ENODEV);
+
+	if (IS_ERR(buffer)) {
+		pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
+			 "0x%x) from heap(s) %sfor client %s with heap "
+			 "mask 0x%x\n",
+			len, align, dbg_str, client->name, client->heap_mask);
+		return ERR_PTR(PTR_ERR(buffer));
+	}
+
+	buffer->creator = client;
+	handle = ion_handle_create(client, buffer);
+
+	ion_buffer_put(buffer);
+
+	if (!IS_ERR(handle)) {
+		mutex_lock(&client->lock);
+		ion_handle_add(client, handle);
+		mutex_unlock(&client->lock);
+	}
+
+
+	return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+	bool valid_handle;
+
+	BUG_ON(client != handle->client);
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	if (!valid_handle) {
+		mutex_unlock(&client->lock);
+		WARN(1, "%s: invalid handle passed to free.\n", __func__);
+		return;
+	}
+	ion_handle_put(handle);
+	mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_buffer *buffer;
+	int ret;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+
+	if (!buffer->heap->ops->phys) {
+		pr_err("%s: ion_phys is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return -ENODEV;
+	}
+	mutex_unlock(&client->lock);
+	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+	return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+	void *vaddr;
+
+	if (buffer->kmap_cnt) {
+		buffer->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+	if (IS_ERR_OR_NULL(vaddr))
+		return vaddr;
+	buffer->vaddr = vaddr;
+	buffer->kmap_cnt++;
+	return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+	void *vaddr;
+
+	if (handle->kmap_cnt) {
+		handle->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = ion_buffer_kmap_get(buffer);
+	if (IS_ERR_OR_NULL(vaddr))
+		return vaddr;
+	handle->kmap_cnt++;
+	return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+	buffer->kmap_cnt--;
+	if (!buffer->kmap_cnt) {
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+		buffer->vaddr = NULL;
+	}
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+
+	handle->kmap_cnt--;
+	if (!handle->kmap_cnt)
+		ion_buffer_kmap_put(buffer);
+}
+
+static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
+		int domain_num, int partition_num, unsigned long align,
+		unsigned long iova_length, unsigned long flags,
+		unsigned long *iova)
+{
+	struct ion_iommu_map *data;
+	int ret;
+
+	data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	data->buffer = buffer;
+	iommu_map_domain(data) = domain_num;
+	iommu_map_partition(data) = partition_num;
+
+	ret = buffer->heap->ops->map_iommu(buffer, data,
+						domain_num,
+						partition_num,
+						align,
+						iova_length,
+						flags);
+
+	if (ret)
+		goto out;
+
+	kref_init(&data->ref);
+	*iova = data->iova_addr;
+
+	ion_iommu_add(buffer, data);
+
+	return data;
+
+out:
+	kfree(data);
+	return ERR_PTR(ret);
+}
+
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+			int domain_num, int partition_num, unsigned long align,
+			unsigned long iova_length, unsigned long *iova,
+			unsigned long *buffer_size,
+			unsigned long flags, unsigned long iommu_flags)
+{
+	struct ion_buffer *buffer;
+	struct ion_iommu_map *iommu_map;
+	int ret = 0;
+
+	if (ION_IS_CACHED(flags)) {
+		pr_err("%s: Cannot map iommu as cached.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_kernel.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+
+	if (!handle->buffer->heap->ops->map_iommu) {
+		pr_err("%s: map_iommu is not implemented by this heap.\n",
+		       __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (!iova_length)
+		iova_length = buffer->size;
+
+	if (buffer->size > iova_length) {
+		pr_debug("%s: iova length %lx is not at least buffer size"
+			" %x\n", __func__, iova_length, buffer->size);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (buffer->size & ~PAGE_MASK) {
+		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
+			buffer->size, PAGE_SIZE);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (iova_length & ~PAGE_MASK) {
+		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
+			iova_length, PAGE_SIZE);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
+	if (!iommu_map) {
+		iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
+					    align, iova_length, flags, iova);
+		if (!IS_ERR_OR_NULL(iommu_map)) {
+			iommu_map->flags = iommu_flags;
+
+			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
+				kref_get(&iommu_map->ref);
+		}
+	} else {
+		if (iommu_map->flags != iommu_flags) {
+			pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
+				__func__, handle,
+				iommu_map->flags, iommu_flags);
+			ret = -EINVAL;
+		} else if (iommu_map->mapped_size != iova_length) {
+			pr_err("%s: handle %p is already mapped with length"
+					" %x, trying to map with length %lx\n",
+				__func__, handle, iommu_map->mapped_size,
+				iova_length);
+			ret = -EINVAL;
+		} else {
+			kref_get(&iommu_map->ref);
+			*iova = iommu_map->iova_addr;
+		}
+	}
+	if (!ret)
+		buffer->iommu_map_cnt++;
+	*buffer_size = buffer->size;
+out:
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+static void ion_iommu_release(struct kref *kref)
+{
+	struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
+						ref);
+	struct ion_buffer *buffer = map->buffer;
+
+	rb_erase(&map->node, &buffer->iommu_maps);
+	buffer->heap->ops->unmap_iommu(map);
+	kfree(map);
+}
+
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+			int domain_num, int partition_num)
+{
+	struct ion_iommu_map *iommu_map;
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	buffer = handle->buffer;
+
+	mutex_lock(&buffer->lock);
+
+	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
+
+	if (!iommu_map) {
+		WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+				domain_num, partition_num, buffer);
+		goto out;
+	}
+
+	kref_put(&iommu_map->ref, ion_iommu_release);
+
+	buffer->iommu_map_cnt--;
+out:
+	mutex_unlock(&buffer->lock);
+
+	mutex_unlock(&client->lock);
+
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+			unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	void *vaddr;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_kernel.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+	buffer = handle->buffer;
+
+	if (!handle->buffer->heap->ops->map_kernel) {
+		pr_err("%s: map_kernel is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	if (ion_validate_buffer_flags(buffer, flags)) {
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EEXIST);
+	}
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_handle_kmap_get(handle);
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	ion_handle_kmap_put(handle);
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = current->active_mm;
+	struct vm_area_struct *vma;
+	int ret = 1;
+
+	if (end < start)
+		goto out;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, start);
+	if (vma && vma->vm_start < end) {
+		if (start < vma->vm_start)
+			goto out_up;
+		if (end > vma->vm_end)
+			goto out_up;
+		ret = 0;
+	}
+
+out_up:
+	up_read(&mm->mmap_sem);
+out:
+	return ret;
+}
+
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *uaddr, unsigned long offset, unsigned long len,
+			unsigned int cmd)
+{
+	struct ion_buffer *buffer;
+	int ret = -EINVAL;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to do_cache_op.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+
+	if (!ION_IS_CACHED(buffer->flags)) {
+		ret = 0;
+		goto out;
+	}
+
+	if (!handle->buffer->heap->ops->cache_op) {
+		pr_err("%s: cache_op is not implemented by this heap.\n",
+		       __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+
+	ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
+						offset, len, cmd);
+
+out:
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return ret;
+
+}
+EXPORT_SYMBOL(ion_do_cache_op);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+	struct ion_client *client = s->private;
+	struct rb_node *n;
+	struct rb_node *n2;
+
+	seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
+			"heap_name", "size_in_bytes", "handle refcount",
+			"buffer", "physical", "[domain,partition] - virt");
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		enum ion_heap_type type = handle->buffer->heap->type;
+
+		seq_printf(s, "%16.16s: %16x : %16d : %12p",
+				handle->buffer->heap->name,
+				handle->buffer->size,
+				atomic_read(&handle->ref.refcount),
+				handle->buffer);
+
+		if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
+			type == ION_HEAP_TYPE_CARVEOUT ||
+			type == ION_HEAP_TYPE_CP)
+			seq_printf(s, " : %12lx", handle->buffer->priv_phys);
+		else
+			seq_printf(s, " : %12s", "N/A");
+
+		for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
+				   n2 = rb_next(n2)) {
+			struct ion_iommu_map *imap =
+				rb_entry(n2, struct ion_iommu_map, node);
+			seq_printf(s, " : [%d,%d] - %8lx",
+					imap->domain_info[DI_DOMAIN_NUM],
+					imap->domain_info[DI_PARTITION_NUM],
+					imap->iova_addr);
+		}
+		seq_printf(s, "\n");
+	}
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+	.open = ion_debug_client_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     unsigned int heap_mask,
+				     const char *name)
+{
+	struct ion_client *client;
+	struct task_struct *task;
+	struct rb_node **p;
+	struct rb_node *parent = NULL;
+	struct ion_client *entry;
+	pid_t pid;
+	unsigned int name_len;
+
+	if (!name) {
+		pr_err("%s: Name cannot be null\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	name_len = strnlen(name, 64);
+
+	get_task_struct(current->group_leader);
+	task_lock(current->group_leader);
+	pid = task_pid_nr(current->group_leader);
+	if (current->group_leader->flags & PF_KTHREAD) {
+		put_task_struct(current->group_leader);
+		task = NULL;
+	} else {
+		task = current->group_leader;
+	}
+	task_unlock(current->group_leader);
+
+	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+	if (!client) {
+		if (task)
+			put_task_struct(current->group_leader);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	client->dev = dev;
+	client->handles = RB_ROOT;
+	mutex_init(&client->lock);
+
+	client->name = kzalloc(name_len+1, GFP_KERNEL);
+	if (!client->name) {
+		put_task_struct(current->group_leader);
+		kfree(client);
+		return ERR_PTR(-ENOMEM);
+	} else {
+		strlcpy(client->name, name, name_len+1);
+	}
+
+	client->heap_mask = heap_mask;
+	client->task = task;
+	client->pid = pid;
+
+	mutex_lock(&dev->lock);
+	p = &dev->clients.rb_node;
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_client, node);
+
+		if (client < entry)
+			p = &(*p)->rb_left;
+		else if (client > entry)
+			p = &(*p)->rb_right;
+	}
+	rb_link_node(&client->node, parent, p);
+	rb_insert_color(&client->node, &dev->clients);
+
+
+	client->debug_root = debugfs_create_file(name, 0664,
+						 dev->debug_root, client,
+						 &debug_client_fops);
+	mutex_unlock(&dev->lock);
+
+	pr_info("%s: create ion_client (%s) at %p\n", __func__, client->name, client);
+	return client;
+}
+
+void ion_client_destroy(struct ion_client *client)
+{
+	struct ion_device *dev = client->dev;
+	struct rb_node *n;
+
+	pr_info("%s: destroy ion_client %p (%s)\n", __func__, client, client->name);
+	while ((n = rb_first(&client->handles))) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		mutex_lock(&client->lock);
+		ion_handle_destroy(&handle->ref);
+		mutex_unlock(&client->lock);
+	}
+	mutex_lock(&dev->lock);
+	if (client->task)
+		put_task_struct(client->task);
+	rb_erase(&client->node, &dev->clients);
+	debugfs_remove_recursive(client->debug_root);
+	mutex_unlock(&dev->lock);
+
+	kfree(client->name);
+	kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+			unsigned long *flags)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	*flags = buffer->flags;
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_flags);
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+			unsigned long *size)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	*size = buffer->size;
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_size);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	struct sg_table *table;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_dma.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = handle->buffer;
+	table = buffer->sg_table;
+	mutex_unlock(&client->lock);
+	return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+					enum dma_data_direction direction)
+{
+	struct dma_buf *dmabuf = attachment->dmabuf;
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+			      struct sg_table *table,
+			      enum dma_data_direction direction)
+{
+}
+
+static void ion_vma_open(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+
+	mutex_lock(&buffer->lock);
+	buffer->umap_cnt++;
+	mutex_unlock(&buffer->lock);
+}
+
+static void ion_vma_close(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+
+	mutex_lock(&buffer->lock);
+	buffer->umap_cnt--;
+	mutex_unlock(&buffer->lock);
+
+	if (buffer->heap->ops->unmap_user)
+		buffer->heap->ops->unmap_user(buffer->heap, buffer);
+}
+
+static struct vm_operations_struct ion_vm_ops = {
+	.open = ion_vma_open,
+	.close = ion_vma_close,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	int ret;
+
+	if (!buffer->heap->ops->map_user) {
+		pr_err("%s: this heap does not define a method for mapping "
+		       "to userspace\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&buffer->lock);
+	
+	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+
+	if (ret) {
+		mutex_unlock(&buffer->lock);
+		pr_err("%s: failure mapping buffer to userspace\n",
+		       __func__);
+	} else {
+		buffer->umap_cnt++;
+		mutex_unlock(&buffer->lock);
+
+		vma->vm_ops = &ion_vm_ops;
+		vma->vm_private_data = buffer;
+	}
+	return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	return buffer->vaddr + offset;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+			       void *ptr)
+{
+	return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+					size_t len,
+					enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	void *vaddr;
+
+	if (!buffer->heap->ops->map_kernel) {
+		pr_err("%s: map kernel is not implemented by this heap.\n",
+		       __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_buffer_kmap_get(buffer);
+	mutex_unlock(&buffer->lock);
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
+	if (!vaddr)
+		return -ENOMEM;
+	return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+				       size_t len,
+				       enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	mutex_lock(&buffer->lock);
+	ion_buffer_kmap_put(buffer);
+	mutex_unlock(&buffer->lock);
+}
+
+struct dma_buf_ops dma_buf_ops = {
+	.map_dma_buf = ion_map_dma_buf,
+	.unmap_dma_buf = ion_unmap_dma_buf,
+	.mmap = ion_mmap,
+	.release = ion_dma_buf_release,
+	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
+	.end_cpu_access = ion_dma_buf_end_cpu_access,
+	.kmap_atomic = ion_dma_buf_kmap,
+	.kunmap_atomic = ion_dma_buf_kunmap,
+	.kmap = ion_dma_buf_kmap,
+	.kunmap = ion_dma_buf_kunmap,
+};
+
+static int ion_share_set_flags(struct ion_client *client,
+				struct ion_handle *handle,
+				unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	bool valid_handle;
+	unsigned long ion_flags = ION_SET_CACHE(CACHED);
+	if (flags & O_DSYNC)
+		ion_flags = ION_SET_CACHE(UNCACHED);
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	mutex_unlock(&client->lock);
+	if (!valid_handle) {
+		WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+
+	mutex_lock(&buffer->lock);
+	if (ion_validate_buffer_flags(buffer, ion_flags)) {
+		mutex_unlock(&buffer->lock);
+		return -EEXIST;
+	}
+	mutex_unlock(&buffer->lock);
+	return 0;
+}
+
+
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	struct dma_buf *dmabuf;
+	bool valid_handle;
+	int fd;
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	mutex_unlock(&client->lock);
+	if (!valid_handle) {
+		WARN(1, "%s: invalid handle passed to share.\n", __func__);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+	ion_buffer_get(buffer);
+	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+	if (IS_ERR(dmabuf)) {
+		ion_buffer_put(buffer);
+		return PTR_ERR(dmabuf);
+	}
+	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+	if (fd < 0)
+		dma_buf_put(dmabuf);
+
+	return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_buffer *buffer;
+	struct ion_handle *handle;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dmabuf))
+		return ERR_PTR(PTR_ERR(dmabuf));
+	
+
+	if (dmabuf->ops != &dma_buf_ops) {
+		pr_err("%s: can not import dmabuf from another exporter\n",
+		       __func__);
+		dma_buf_put(dmabuf);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = dmabuf->priv;
+
+	mutex_lock(&client->lock);
+	
+	handle = ion_handle_lookup(client, buffer);
+	if (!IS_ERR_OR_NULL(handle)) {
+		ion_handle_get(handle);
+		goto end;
+	}
+	handle = ion_handle_create(client, buffer);
+	if (IS_ERR_OR_NULL(handle))
+		goto end;
+	ion_handle_add(client, handle);
+end:
+	mutex_unlock(&client->lock);
+	dma_buf_put(dmabuf);
+	return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct ion_client *client = filp->private_data;
+
+	switch (cmd) {
+	case ION_IOC_ALLOC:
+	{
+		struct ion_allocation_data data;
+
+		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+			return -EFAULT;
+		data.handle = ion_alloc(client, data.len, data.align,
+					     data.flags);
+
+		if (IS_ERR(data.handle))
+			return PTR_ERR(data.handle);
+
+		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+			ion_free(client, data.handle);
+			return -EFAULT;
+		}
+		break;
+	}
+	case ION_IOC_FREE:
+	{
+		struct ion_handle_data data;
+		bool valid;
+
+		if (copy_from_user(&data, (void __user *)arg,
+				   sizeof(struct ion_handle_data)))
+			return -EFAULT;
+		mutex_lock(&client->lock);
+		valid = ion_handle_validate(client, data.handle);
+		mutex_unlock(&client->lock);
+		if (!valid)
+			return -EINVAL;
+		ion_free(client, data.handle);
+		break;
+	}
+	case ION_IOC_MAP:
+	case ION_IOC_SHARE:
+	{
+		struct ion_fd_data data;
+		int ret;
+		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+			return -EFAULT;
+
+		ret = ion_share_set_flags(client, data.handle, filp->f_flags);
+		if (ret)
+			return ret;
+
+		data.fd = ion_share_dma_buf(client, data.handle);
+		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+			return -EFAULT;
+		if (data.fd < 0)
+			return data.fd;
+		break;
+	}
+	case ION_IOC_IMPORT:
+	{
+		struct ion_fd_data data;
+		int ret = 0;
+		if (copy_from_user(&data, (void __user *)arg,
+				   sizeof(struct ion_fd_data)))
+			return -EFAULT;
+		data.handle = ion_import_dma_buf(client, data.fd);
+		if (IS_ERR(data.handle))
+			data.handle = NULL;
+		if (copy_to_user((void __user *)arg, &data,
+				 sizeof(struct ion_fd_data)))
+			return -EFAULT;
+		if (ret < 0)
+			return ret;
+		break;
+	}
+	case ION_IOC_CUSTOM:
+	{
+		struct ion_device *dev = client->dev;
+		struct ion_custom_data data;
+
+		if (!dev->custom_ioctl)
+			return -ENOTTY;
+		if (copy_from_user(&data, (void __user *)arg,
+				sizeof(struct ion_custom_data)))
+			return -EFAULT;
+		return dev->custom_ioctl(client, data.cmd, data.arg);
+	}
+	case ION_IOC_CLEAN_CACHES:
+	case ION_IOC_INV_CACHES:
+	case ION_IOC_CLEAN_INV_CACHES:
+	{
+		struct ion_flush_data data;
+		unsigned long start, end;
+		struct ion_handle *handle = NULL;
+		int ret;
+
+		if (copy_from_user(&data, (void __user *)arg,
+				sizeof(struct ion_flush_data)))
+			return -EFAULT;
+
+		start = (unsigned long) data.vaddr;
+		end = (unsigned long) data.vaddr + data.length;
+
+		if (check_vaddr_bounds(start, end)) {
+			pr_err("%s: virtual address %p is out of bounds\n",
+				__func__, data.vaddr);
+			return -EINVAL;
+		}
+
+		if (!data.handle) {
+			handle = ion_import_dma_buf(client, data.fd);
+			if (IS_ERR(handle)) {
+				pr_info("%s: Could not import handle: %d\n",
+					__func__, (int)handle);
+				return -EINVAL;
+			}
+		}
+
+		ret = ion_do_cache_op(client,
+					data.handle ? data.handle : handle,
+					data.vaddr, data.offset, data.length,
+					cmd);
+
+		if (!data.handle)
+			ion_free(client, handle);
+
+		if (ret < 0)
+			return ret;
+		break;
+
+	}
+	case ION_IOC_GET_FLAGS:
+	{
+		struct ion_flag_data data;
+		int ret;
+		if (copy_from_user(&data, (void __user *)arg,
+				   sizeof(struct ion_flag_data)))
+			return -EFAULT;
+
+		ret = ion_handle_get_flags(client, data.handle, &data.flags);
+		if (ret < 0)
+			return ret;
+		if (copy_to_user((void __user *)arg, &data,
+				 sizeof(struct ion_flag_data)))
+			return -EFAULT;
+		break;
+	}
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+	struct ion_client *client = file->private_data;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	ion_client_destroy(client);
+	return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+	struct miscdevice *miscdev = file->private_data;
+	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+	struct ion_client *client;
+	char debug_name[64];
+	char task_comm[TASK_COMM_LEN];
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	if (current->group_leader->flags & PF_KTHREAD) {
+		snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+	} else {
+		strcpy(debug_name, get_task_comm(task_comm, current->group_leader));
+	}
+	
+	client = ion_client_create(dev, -1, debug_name);
+	if (IS_ERR_OR_NULL(client))
+		return PTR_ERR(client);
+	file->private_data = client;
+
+	return 0;
+}
+
+static const struct file_operations ion_fops = {
+	.owner          = THIS_MODULE,
+	.open           = ion_open,
+	.release        = ion_release,
+	.unlocked_ioctl = ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+				   enum ion_heap_ids id)
+{
+	size_t size = 0;
+	struct rb_node *n;
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n,
+						     struct ion_handle,
+						     node);
+		if (handle->buffer->heap->id == id)
+			size += handle->buffer->size;
+	}
+	mutex_unlock(&client->lock);
+	return size;
+}
+
+static int ion_debug_find_buffer_owner(const struct ion_client *client,
+				       const struct ion_buffer *buf)
+{
+	struct rb_node *n;
+
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		const struct ion_handle *handle = rb_entry(n,
+						     const struct ion_handle,
+						     node);
+		if (handle->buffer == buf)
+			return 1;
+	}
+	return 0;
+}
+
+static void ion_debug_mem_map_add(struct rb_root *mem_map,
+				  struct mem_map_data *data)
+{
+	struct rb_node **p = &mem_map->rb_node;
+	struct rb_node *parent = NULL;
+	struct mem_map_data *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct mem_map_data, node);
+
+		if (data->addr < entry->addr) {
+			p = &(*p)->rb_left;
+		} else if (data->addr > entry->addr) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: mem_map_data already found.", __func__);
+			BUG();
+		}
+	}
+	rb_link_node(&data->node, parent, p);
+	rb_insert_color(&data->node, mem_map);
+}
+
+const char *ion_debug_locate_owner(const struct ion_device *dev,
+					 const struct ion_buffer *buffer)
+{
+	struct rb_node *j;
+	const char *client_name = NULL;
+
+	for (j = rb_first(&dev->clients); j && !client_name;
+			  j = rb_next(j)) {
+		struct ion_client *client = rb_entry(j, struct ion_client,
+						     node);
+		if (ion_debug_find_buffer_owner(client, buffer))
+			client_name = client->name;
+	}
+	return client_name;
+}
+
+void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
+			      struct rb_root *mem_map)
+{
+	struct ion_device *dev = heap->dev;
+	struct rb_node *n;
+
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buffer =
+				rb_entry(n, struct ion_buffer, node);
+		if (buffer->heap->id == heap->id) {
+			struct mem_map_data *data =
+					kzalloc(sizeof(*data), GFP_KERNEL);
+			if (!data) {
+				seq_printf(s, "ERROR: out of memory. "
+					   "Part of memory map will not be logged\n");
+				break;
+			}
+			if (heap->id == ION_IOMMU_HEAP_ID) {
+				data->addr = (unsigned long)buffer;
+			} else {
+				data->addr = buffer->priv_phys;
+				data->addr_end = buffer->priv_phys + buffer->size-1;
+			}
+			data->size = buffer->size;
+			data->client_name = ion_debug_locate_owner(dev, buffer);
+
+			{
+				
+				struct rb_node *p = NULL;
+				struct ion_client *entry = NULL;
+
+				for (p = rb_first(&dev->clients); p && !data->creator_name;
+						p = rb_next(p)) {
+					entry = rb_entry(p, struct ion_client, node);
+					if (entry == buffer->creator)
+						data->creator_name = entry->name;
+				}
+			}
+			ion_debug_mem_map_add(mem_map, data);
+		}
+	}
+}
+
+static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
+{
+	if (mem_map) {
+		struct rb_node *n;
+		while ((n = rb_first(mem_map)) != 0) {
+			struct mem_map_data *data =
+					rb_entry(n, struct mem_map_data, node);
+			rb_erase(&data->node, mem_map);
+			kfree(data);
+		}
+	}
+}
+
+static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
+{
+	if (heap->ops->print_debug) {
+		struct rb_root mem_map = RB_ROOT;
+		ion_debug_mem_map_create(s, heap, &mem_map);
+		heap->ops->print_debug(heap, s, &mem_map);
+		ion_debug_mem_map_destroy(&mem_map);
+	}
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+	struct ion_heap *heap = s->private;
+	struct ion_device *dev = heap->dev;
+	struct rb_node *n;
+
+	mutex_lock(&dev->lock);
+	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						     node);
+		size_t size = ion_debug_heap_total(client, heap->id);
+		if (!size)
+			continue;
+		if (client->task) {
+			char task_comm[TASK_COMM_LEN];
+
+			get_task_comm(task_comm, client->task);
+			seq_printf(s, "%16.s %16u %16u\n", task_comm,
+				   client->pid, size);
+		} else {
+			seq_printf(s, "%16.s %16u %16u\n", client->name,
+				   client->pid, size);
+		}
+	}
+	ion_heap_print_debug(s, heap);
+	mutex_unlock(&dev->lock);
+	return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+	.open = ion_debug_heap_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+	struct rb_node **p = &dev->heaps.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_heap *entry;
+
+	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+	    !heap->ops->unmap_dma)
+		pr_err("%s: can not add heap with invalid ops struct.\n",
+		       __func__);
+
+	heap->dev = dev;
+	mutex_lock(&dev->lock);
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_heap, node);
+
+		if (heap->id < entry->id) {
+			p = &(*p)->rb_left;
+		} else if (heap->id > entry->id ) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: can not insert multiple heaps with "
+				"id %d\n", __func__, heap->id);
+			goto end;
+		}
+	}
+
+	rb_link_node(&heap->node, parent, p);
+	rb_insert_color(&heap->node, &dev->heaps);
+	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+			    &debug_heap_fops);
+end:
+	mutex_unlock(&dev->lock);
+}
+
+int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
+			void *data)
+{
+	struct rb_node *n;
+	int ret_val = 0;
+
+	mutex_lock(&dev->lock);
+	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
+		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+		if (heap->type != ION_HEAP_TYPE_CP)
+			continue;
+		if (ION_HEAP(heap->id) != heap_id)
+			continue;
+		if (heap->ops->secure_heap)
+			ret_val = heap->ops->secure_heap(heap, version, data);
+		else
+			ret_val = -EINVAL;
+		break;
+	}
+	mutex_unlock(&dev->lock);
+	return ret_val;
+}
+EXPORT_SYMBOL(ion_secure_heap);
+
+int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
+			void *data)
+{
+	struct rb_node *n;
+	int ret_val = 0;
+
+	mutex_lock(&dev->lock);
+	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
+		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+		if (heap->type != ION_HEAP_TYPE_CP)
+			continue;
+		if (ION_HEAP(heap->id) != heap_id)
+			continue;
+		if (heap->ops->secure_heap)
+			ret_val = heap->ops->unsecure_heap(heap, version, data);
+		else
+			ret_val = -EINVAL;
+		break;
+	}
+	mutex_unlock(&dev->lock);
+	return ret_val;
+}
+EXPORT_SYMBOL(ion_unsecure_heap);
+
+static int ion_debug_leak_show(struct seq_file *s, void *unused)
+{
+	struct ion_device *dev = s->private;
+	struct rb_node *n;
+	struct rb_node *n2;
+
+	
+	seq_printf(s, "%16.s %12.s %16.s %16.s %16.s\n", "buffer", "physical", "heap", "size",
+		"ref cnt");
+	mutex_lock(&dev->lock);
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
+						     node);
+
+		buf->marked = 1;
+	}
+
+	
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						     node);
+
+		mutex_lock(&client->lock);
+		for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
+			struct ion_handle *handle = rb_entry(n2,
+						struct ion_handle, node);
+
+			handle->buffer->marked = 0;
+
+		}
+		mutex_unlock(&client->lock);
+
+	}
+
+	
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
+						     node);
+		enum ion_heap_type type = buf->heap->type;
+
+		if (buf->marked == 1) {
+			seq_printf(s, "%16.x", (int)buf);
+
+			if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
+				type == ION_HEAP_TYPE_CARVEOUT ||
+				type == ION_HEAP_TYPE_CP)
+				seq_printf(s, " %12lx", buf->priv_phys);
+			else
+				seq_printf(s, " %12s", "N/A");
+
+			seq_printf(s, " %16.s %16.x %16.d\n",
+				buf->heap->name, buf->size,
+				atomic_read(&buf->ref.refcount));
+		}
+	}
+	mutex_unlock(&dev->lock);
+	return 0;
+}
+
+static int ion_debug_leak_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_leak_show, inode->i_private);
+}
+
+static const struct file_operations debug_leak_fops = {
+	.open = ion_debug_leak_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg))
+{
+	struct ion_device *idev;
+	int ret;
+
+	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+	if (!idev)
+		return ERR_PTR(-ENOMEM);
+
+	idev->dev.minor = MISC_DYNAMIC_MINOR;
+	idev->dev.name = "ion";
+	idev->dev.fops = &ion_fops;
+	idev->dev.parent = NULL;
+	ret = misc_register(&idev->dev);
+	if (ret) {
+		pr_err("ion: failed to register misc device.\n");
+		return ERR_PTR(ret);
+	}
+
+	idev->debug_root = debugfs_create_dir("ion", NULL);
+	if (IS_ERR_OR_NULL(idev->debug_root))
+		pr_err("ion: failed to create debug files.\n");
+
+	idev->custom_ioctl = custom_ioctl;
+	idev->buffers = RB_ROOT;
+	mutex_init(&idev->lock);
+	idev->heaps = RB_ROOT;
+	idev->clients = RB_ROOT;
+	debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
+			    &debug_leak_fops);
+	return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+	misc_deregister(&dev->dev);
+	
+	kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+	int i, ret;
+
+	for (i = 0; i < data->nr; i++) {
+		if (data->heaps[i].size == 0)
+			continue;
+		ret = memblock_reserve(data->heaps[i].base,
+				       data->heaps[i].size);
+		if (ret)
+			pr_err("memblock reserve of %x@%lx failed\n",
+			       data->heaps[i].size,
+			       data->heaps[i].base);
+	}
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
new file mode 100644
index 0000000..5f7fe37
--- /dev/null
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -0,0 +1,497 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+#include "ion_priv.h"
+
+#include <mach/iommu_domains.h>
+#include <asm/mach/map.h>
+#include <asm/cacheflush.h>
+
+struct ion_carveout_heap {
+	struct ion_heap heap;
+	struct gen_pool *pool;
+	ion_phys_addr_t base;
+	unsigned long allocated_bytes;
+	unsigned long total_size;
+	int (*request_region)(void *);
+	int (*release_region)(void *);
+	atomic_t map_count;
+	void *bus_id;
+	unsigned int has_outer_cache;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+				      unsigned long size,
+				      unsigned long align)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+	unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool,
+							size, ilog2(align));
+
+	if (!offset) {
+		if ((carveout_heap->total_size -
+		      carveout_heap->allocated_bytes) >= size)
+			pr_debug("%s: heap %s has enough memory (%lx) but"
+				" the allocation of size %lx still failed."
+				" Memory is probably fragmented.",
+				__func__, heap->name,
+				carveout_heap->total_size -
+				carveout_heap->allocated_bytes, size);
+		return ION_CARVEOUT_ALLOCATE_FAIL;
+	}
+
+	carveout_heap->allocated_bytes += size;
+	return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+
+	if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+		return;
+	gen_pool_free(carveout_heap->pool, addr, size);
+	carveout_heap->allocated_bytes -= size;
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+				  struct ion_buffer *buffer,
+				  ion_phys_addr_t *addr, size_t *len)
+{
+	*addr = buffer->priv_phys;
+	*len = buffer->size;
+	return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	buffer->priv_phys = ion_carveout_allocate(heap, size, align);
+	return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+
+	ion_carveout_free(heap, buffer->priv_phys, buffer->size);
+	buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
+}
+
+struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+					      struct ion_buffer *buffer)
+{
+	struct sg_table *table;
+	int ret;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err0;
+
+	table->sgl->length = buffer->size;
+	table->sgl->offset = 0;
+	table->sgl->dma_address = buffer->priv_phys;
+
+	return table;
+
+err0:
+	kfree(table);
+	return ERR_PTR(ret);
+}
+
+void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
+}
+
+static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
+{
+	int ret_value = 0;
+	if (atomic_inc_return(&carveout_heap->map_count) == 1) {
+		if (carveout_heap->request_region) {
+			ret_value = carveout_heap->request_region(
+						carveout_heap->bus_id);
+			if (ret_value) {
+				pr_err("Unable to request SMI region");
+				atomic_dec(&carveout_heap->map_count);
+			}
+		}
+	}
+	return ret_value;
+}
+
+static int ion_carveout_release_region(struct ion_carveout_heap *carveout_heap)
+{
+	int ret_value = 0;
+	if (atomic_dec_and_test(&carveout_heap->map_count)) {
+		if (carveout_heap->release_region) {
+			ret_value = carveout_heap->release_region(
+						carveout_heap->bus_id);
+			if (ret_value)
+				pr_err("Unable to release SMI region");
+		}
+	}
+	return ret_value;
+}
+
+void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
+				   struct ion_buffer *buffer)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+	void *ret_value;
+
+	if (ion_carveout_request_region(carveout_heap))
+		return NULL;
+
+	if (ION_IS_CACHED(buffer->flags))
+		ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
+	else
+		ret_value = ioremap(buffer->priv_phys, buffer->size);
+
+	if (!ret_value)
+		ion_carveout_release_region(carveout_heap);
+	return ret_value;
+}
+
+void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
+				    struct ion_buffer *buffer)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+
+	__arm_iounmap(buffer->vaddr);
+	buffer->vaddr = NULL;
+
+	ion_carveout_release_region(carveout_heap);
+	return;
+}
+
+int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+			       struct vm_area_struct *vma)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+	int ret_value = 0;
+
+	if (ion_carveout_request_region(carveout_heap))
+		return -EINVAL;
+
+	if (!ION_IS_CACHED(buffer->flags))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	ret_value =  remap_pfn_range(vma, vma->vm_start,
+			__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+			vma->vm_end - vma->vm_start,
+			vma->vm_page_prot);
+
+	if (ret_value)
+		ion_carveout_release_region(carveout_heap);
+	return ret_value;
+}
+
+void ion_carveout_heap_unmap_user(struct ion_heap *heap,
+				    struct ion_buffer *buffer)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+	ion_carveout_release_region(carveout_heap);
+}
+
+int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	struct ion_carveout_heap *carveout_heap =
+	     container_of(heap, struct  ion_carveout_heap, heap);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
+		break;
+	case ION_IOC_INV_CACHES:
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (carveout_heap->has_outer_cache) {
+		unsigned long pstart = buffer->priv_phys + offset;
+		outer_cache_op(pstart, pstart + length);
+	}
+	return 0;
+}
+
+static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
+				    const struct rb_root *mem_map)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+
+	seq_printf(s, "total bytes currently allocated: %lx\n",
+		carveout_heap->allocated_bytes);
+	seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
+
+	if (mem_map) {
+		unsigned long base = carveout_heap->base;
+		unsigned long size = carveout_heap->total_size;
+		unsigned long end = base+size;
+		unsigned long last_end = base;
+		struct rb_node *n;
+
+		seq_printf(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %16.s %14.s %14.s %14.s\n",
+			   "client", "creator", "start address", "end address",
+			   "size (hex)");
+
+		for (n = rb_first(mem_map); n; n = rb_next(n)) {
+			struct mem_map_data *data =
+					rb_entry(n, struct mem_map_data, node);
+			const char *client_name = "(null)";
+			const char *creator_name = "(null)";
+
+			if (last_end < data->addr) {
+				seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
+					   "FREE", "NA", last_end, data->addr-1,
+					   data->addr-last_end,
+					   data->addr-last_end);
+			}
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			if (data->creator_name)
+				creator_name = data->creator_name;
+
+			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
+				   client_name, creator_name, data->addr,
+				   data->addr_end,
+				   data->size, data->size);
+			last_end = data->addr_end+1;
+		}
+		if (last_end < end) {
+			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n", "FREE", "NA",
+				last_end, end-1, end-last_end, end-last_end);
+		}
+	}
+	return 0;
+}
+
+int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
+					struct ion_iommu_map *data,
+					unsigned int domain_num,
+					unsigned int partition_num,
+					unsigned long align,
+					unsigned long iova_length,
+					unsigned long flags)
+{
+	struct iommu_domain *domain;
+	int ret = 0;
+	unsigned long extra;
+	struct scatterlist *sglist = 0;
+	int prot = IOMMU_WRITE | IOMMU_READ;
+	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
+
+	data->mapped_size = iova_length;
+
+	if (!msm_use_iommu()) {
+		data->iova_addr = buffer->priv_phys;
+		return 0;
+	}
+
+	extra = iova_length - buffer->size;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align,
+						&data->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	sglist = vmalloc(sizeof(*sglist));
+	if (!sglist)
+		goto out1;
+
+	sg_init_table(sglist, 1);
+	sglist->length = buffer->size;
+	sglist->offset = 0;
+	sglist->dma_address = buffer->priv_phys;
+
+	ret = iommu_map_range(domain, data->iova_addr, sglist,
+			      buffer->size, prot);
+	if (ret) {
+		pr_err("%s: could not map %lx in domain %p\n",
+			__func__, data->iova_addr, domain);
+		goto out1;
+	}
+
+	if (extra) {
+		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
+					  SZ_4K, prot);
+		if (ret)
+			goto out2;
+	}
+	vfree(sglist);
+	return ret;
+
+out2:
+	iommu_unmap_range(domain, data->iova_addr, buffer->size);
+out1:
+	vfree(sglist);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+out:
+
+	return ret;
+}
+
+void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	if (!msm_use_iommu())
+		return;
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+	.allocate = ion_carveout_heap_allocate,
+	.free = ion_carveout_heap_free,
+	.phys = ion_carveout_heap_phys,
+	.map_user = ion_carveout_heap_map_user,
+	.map_kernel = ion_carveout_heap_map_kernel,
+	.unmap_user = ion_carveout_heap_unmap_user,
+	.unmap_kernel = ion_carveout_heap_unmap_kernel,
+	.map_dma = ion_carveout_heap_map_dma,
+	.unmap_dma = ion_carveout_heap_unmap_dma,
+	.cache_op = ion_carveout_cache_ops,
+	.print_debug = ion_carveout_print_debug,
+	.map_iommu = ion_carveout_heap_map_iommu,
+	.unmap_iommu = ion_carveout_heap_unmap_iommu,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_carveout_heap *carveout_heap;
+	int ret;
+
+	carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+	if (!carveout_heap)
+		return ERR_PTR(-ENOMEM);
+
+	carveout_heap->pool = gen_pool_create(12, -1);
+	if (!carveout_heap->pool) {
+		kfree(carveout_heap);
+		return ERR_PTR(-ENOMEM);
+	}
+	carveout_heap->base = heap_data->base;
+	ret = gen_pool_add(carveout_heap->pool, carveout_heap->base,
+			heap_data->size, -1);
+	if (ret < 0) {
+		gen_pool_destroy(carveout_heap->pool);
+		kfree(carveout_heap);
+		return ERR_PTR(-EINVAL);
+	}
+	carveout_heap->heap.ops = &carveout_heap_ops;
+	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+	carveout_heap->allocated_bytes = 0;
+	carveout_heap->total_size = heap_data->size;
+	carveout_heap->has_outer_cache = heap_data->has_outer_cache;
+
+	if (heap_data->extra_data) {
+		struct ion_co_heap_pdata *extra_data =
+				heap_data->extra_data;
+
+		if (extra_data->setup_region)
+			carveout_heap->bus_id = extra_data->setup_region();
+		if (extra_data->request_region)
+			carveout_heap->request_region =
+					extra_data->request_region;
+		if (extra_data->release_region)
+			carveout_heap->release_region =
+					extra_data->release_region;
+	}
+	return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_carveout_heap *carveout_heap =
+	     container_of(heap, struct  ion_carveout_heap, heap);
+
+	gen_pool_destroy(carveout_heap->pool);
+	kfree(carveout_heap);
+	carveout_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
new file mode 100644
index 0000000..d0d6a95
--- /dev/null
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -0,0 +1,1033 @@
+/*
+ * drivers/gpu/ion/ion_cp_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/memory_alloc.h>
+#include <linux/seq_file.h>
+#include <linux/fmem.h>
+#include <linux/iommu.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/msm_memtypes.h>
+#include <mach/scm.h>
+#include <mach/iommu_domains.h>
+
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+#include <asm/cacheflush.h>
+
+#include "msm/ion_cp_common.h"
+struct ion_cp_heap {
+	struct ion_heap heap;
+	struct gen_pool *pool;
+	ion_phys_addr_t base;
+	unsigned int permission_type;
+	ion_phys_addr_t secure_base;
+	size_t secure_size;
+	struct mutex lock;
+	unsigned int heap_protected;
+	unsigned long allocated_bytes;
+	unsigned long total_size;
+	int (*request_region)(void *);
+	int (*release_region)(void *);
+	void *bus_id;
+	unsigned long kmap_cached_count;
+	unsigned long kmap_uncached_count;
+	unsigned long umap_count;
+	unsigned long iommu_iova[MAX_DOMAINS];
+	unsigned long iommu_partition[MAX_DOMAINS];
+	int reusable;
+	void *reserved_vrange;
+	int iommu_map_all;
+	int iommu_2x_map_domain;
+	unsigned int has_outer_cache;
+	atomic_t protect_cnt;
+};
+
+enum {
+	HEAP_NOT_PROTECTED = 0,
+	HEAP_PROTECTED = 1,
+};
+
+static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+			unsigned int permission_type, int version,
+			void *data);
+
+static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+				unsigned int permission_type, int version,
+				void *data);
+
+static unsigned long ion_cp_get_total_kmap_count(
+					const struct ion_cp_heap *cp_heap)
+{
+	return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
+}
+
+static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
+{
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+	int ret_value = 0;
+
+	if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
+		
+		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+			ret_value = fmem_set_state(FMEM_C_STATE);
+			if (ret_value)
+				goto out;
+		}
+
+		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
+				cp_heap->secure_size, cp_heap->permission_type,
+				version, data);
+		if (ret_value) {
+			pr_err("Failed to protect memory for heap %s - "
+				"error code: %d\n", heap->name, ret_value);
+
+			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+				if (fmem_set_state(FMEM_T_STATE) != 0)
+					pr_err("%s: unable to transition heap to T-state\n",
+						__func__);
+			}
+			atomic_dec(&cp_heap->protect_cnt);
+		} else {
+			cp_heap->heap_protected = HEAP_PROTECTED;
+			pr_debug("Protected heap %s @ 0x%lx\n",
+				heap->name, cp_heap->base);
+		}
+	}
+out:
+	pr_debug("%s: protect count is %d\n", __func__,
+		atomic_read(&cp_heap->protect_cnt));
+	BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
+	return ret_value;
+}
+
+static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
+{
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
+		int error_code = ion_cp_unprotect_mem(
+			cp_heap->secure_base, cp_heap->secure_size,
+			cp_heap->permission_type, version, data);
+		if (error_code) {
+			pr_err("Failed to un-protect memory for heap %s - "
+				"error code: %d\n", heap->name, error_code);
+		} else  {
+			cp_heap->heap_protected = HEAP_NOT_PROTECTED;
+			pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
+				(unsigned int) cp_heap->base);
+
+			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+				if (fmem_set_state(FMEM_T_STATE) != 0)
+					pr_err("%s: unable to transition heap to T-state",
+						__func__);
+			}
+		}
+	}
+	pr_debug("%s: protect count is %d\n", __func__,
+		atomic_read(&cp_heap->protect_cnt));
+	BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
+}
+
+ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
+				      unsigned long size,
+				      unsigned long align,
+				      unsigned long flags)
+{
+	unsigned long offset;
+	unsigned long secure_allocation = flags & ION_SECURE;
+
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	mutex_lock(&cp_heap->lock);
+	if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
+		mutex_unlock(&cp_heap->lock);
+		pr_err("ION cannot allocate un-secure memory from protected"
+			" heap %s\n", heap->name);
+		return ION_CP_ALLOCATE_FAIL;
+	}
+
+	if (secure_allocation &&
+	    (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
+		mutex_unlock(&cp_heap->lock);
+		pr_err("ION cannot allocate secure memory from heap with "
+			"outstanding mappings: User space: %lu, kernel space "
+			"(cached): %lu\n", cp_heap->umap_count,
+					   cp_heap->kmap_cached_count);
+		return ION_CP_ALLOCATE_FAIL;
+	}
+
+	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+		if (fmem_set_state(FMEM_C_STATE) != 0) {
+			mutex_unlock(&cp_heap->lock);
+			return ION_RESERVED_ALLOCATE_FAIL;
+		}
+	}
+
+	cp_heap->allocated_bytes += size;
+	mutex_unlock(&cp_heap->lock);
+
+	offset = gen_pool_alloc_aligned(cp_heap->pool,
+					size, ilog2(align));
+
+	if (!offset) {
+		mutex_lock(&cp_heap->lock);
+		cp_heap->allocated_bytes -= size;
+		if ((cp_heap->total_size -
+		     cp_heap->allocated_bytes) >= size)
+			pr_debug("%s: heap %s has enough memory (%lx) but"
+				" the allocation of size %lx still failed."
+				" Memory is probably fragmented.\n",
+				__func__, heap->name,
+				cp_heap->total_size -
+				cp_heap->allocated_bytes, size);
+
+		if (cp_heap->reusable && !cp_heap->allocated_bytes &&
+		    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
+			if (fmem_set_state(FMEM_T_STATE) != 0)
+				pr_err("%s: unable to transition heap to T-state\n",
+					__func__);
+		}
+		mutex_unlock(&cp_heap->lock);
+
+		return ION_CP_ALLOCATE_FAIL;
+	}
+
+	return offset;
+}
+
+static void iommu_unmap_all(unsigned long domain_num,
+			    struct ion_cp_heap *cp_heap)
+{
+	unsigned long left_to_unmap = cp_heap->total_size;
+	unsigned long page_size = SZ_64K;
+
+	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
+	if (domain) {
+		unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
+
+		while (left_to_unmap) {
+			iommu_unmap(domain, temp_iova, page_size);
+			temp_iova += page_size;
+			left_to_unmap -= page_size;
+		}
+		if (domain_num == cp_heap->iommu_2x_map_domain)
+			msm_iommu_unmap_extra(domain, temp_iova,
+					      cp_heap->total_size, SZ_64K);
+	} else {
+		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
+	}
+}
+
+void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size)
+{
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	if (addr == ION_CP_ALLOCATE_FAIL)
+		return;
+	gen_pool_free(cp_heap->pool, addr, size);
+
+	mutex_lock(&cp_heap->lock);
+	cp_heap->allocated_bytes -= size;
+
+	if (cp_heap->reusable && !cp_heap->allocated_bytes &&
+	    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
+		if (fmem_set_state(FMEM_T_STATE) != 0)
+			pr_err("%s: unable to transition heap to T-state\n",
+				__func__);
+	}
+
+	
+	if (!cp_heap->allocated_bytes) {
+		unsigned int i;
+		for (i = 0; i < MAX_DOMAINS; ++i) {
+			if (cp_heap->iommu_iova[i]) {
+				unsigned long vaddr_len = cp_heap->total_size;
+
+				if (i == cp_heap->iommu_2x_map_domain)
+					vaddr_len <<= 1;
+				iommu_unmap_all(i, cp_heap);
+
+				msm_free_iova_address(cp_heap->iommu_iova[i], i,
+						cp_heap->iommu_partition[i],
+						vaddr_len);
+			}
+			cp_heap->iommu_iova[i] = 0;
+			cp_heap->iommu_partition[i] = 0;
+		}
+	}
+	mutex_unlock(&cp_heap->lock);
+}
+
+static int ion_cp_heap_phys(struct ion_heap *heap,
+				  struct ion_buffer *buffer,
+				  ion_phys_addr_t *addr, size_t *len)
+{
+	*addr = buffer->priv_phys;
+	*len = buffer->size;
+	return 0;
+}
+
+static int ion_cp_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	buffer->priv_phys = ion_cp_allocate(heap, size, align, flags);
+	return buffer->priv_phys == ION_CP_ALLOCATE_FAIL ? -ENOMEM : 0;
+}
+
+static void ion_cp_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+
+	ion_cp_free(heap, buffer->priv_phys, buffer->size);
+	buffer->priv_phys = ION_CP_ALLOCATE_FAIL;
+}
+
+struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
+{
+	struct sg_table *table;
+	int ret;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err0;
+
+	table->sgl->length = buffer->size;
+	table->sgl->offset = 0;
+	table->sgl->dma_address = buffer->priv_phys;
+
+	return table;
+err0:
+	kfree(table);
+	return ERR_PTR(ret);
+}
+
+struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
+					      struct ion_buffer *buffer)
+{
+	return ion_cp_heap_create_sg_table(buffer);
+}
+
+void ion_cp_heap_unmap_dma(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
+}
+
+static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
+{
+	int ret_value = 0;
+	if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
+		if (cp_heap->request_region)
+			ret_value = cp_heap->request_region(cp_heap->bus_id);
+	return ret_value;
+}
+
+static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
+{
+	int ret_value = 0;
+	if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
+		if (cp_heap->release_region)
+			ret_value = cp_heap->release_region(cp_heap->bus_id);
+	return ret_value;
+}
+
+void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
+				void *virt_base, unsigned long flags)
+{
+	int ret;
+	unsigned int offset = buffer->priv_phys - phys_base;
+	unsigned long start = ((unsigned long)virt_base) + offset;
+	const struct mem_type *type = ION_IS_CACHED(flags) ?
+				get_mem_type(MT_DEVICE_CACHED) :
+				get_mem_type(MT_DEVICE);
+
+	if (phys_base > buffer->priv_phys)
+		return NULL;
+
+
+	ret = ioremap_pages(start, buffer->priv_phys, buffer->size, type);
+
+	if (!ret)
+		return (void *)start;
+	else
+		return NULL;
+}
+
+void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+	void *ret_value = NULL;
+
+	mutex_lock(&cp_heap->lock);
+	if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
+	    ((cp_heap->heap_protected == HEAP_PROTECTED) &&
+	      !ION_IS_CACHED(buffer->flags))) {
+
+		if (ion_cp_request_region(cp_heap)) {
+			mutex_unlock(&cp_heap->lock);
+			return NULL;
+		}
+
+		if (cp_heap->reusable) {
+			ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
+				cp_heap->reserved_vrange, buffer->flags);
+
+		} else {
+			if (ION_IS_CACHED(buffer->flags))
+				ret_value = ioremap_cached(buffer->priv_phys,
+							   buffer->size);
+			else
+				ret_value = ioremap(buffer->priv_phys,
+						    buffer->size);
+		}
+
+		if (!ret_value) {
+			ion_cp_release_region(cp_heap);
+		} else {
+			if (ION_IS_CACHED(buffer->flags))
+				++cp_heap->kmap_cached_count;
+			else
+				++cp_heap->kmap_uncached_count;
+		}
+	}
+	mutex_unlock(&cp_heap->lock);
+	return ret_value;
+}
+
+void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
+				    struct ion_buffer *buffer)
+{
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	if (cp_heap->reusable)
+		unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
+	else
+		__arm_iounmap(buffer->vaddr);
+
+	buffer->vaddr = NULL;
+
+	mutex_lock(&cp_heap->lock);
+	if (ION_IS_CACHED(buffer->flags))
+		--cp_heap->kmap_cached_count;
+	else
+		--cp_heap->kmap_uncached_count;
+	ion_cp_release_region(cp_heap);
+	mutex_unlock(&cp_heap->lock);
+
+	return;
+}
+
+int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+			struct vm_area_struct *vma)
+{
+	int ret_value = -EAGAIN;
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	mutex_lock(&cp_heap->lock);
+	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
+		if (ion_cp_request_region(cp_heap)) {
+			mutex_unlock(&cp_heap->lock);
+			return -EINVAL;
+		}
+
+		if (!ION_IS_CACHED(buffer->flags))
+			vma->vm_page_prot = pgprot_writecombine(
+							vma->vm_page_prot);
+
+		ret_value =  remap_pfn_range(vma, vma->vm_start,
+			__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+			vma->vm_end - vma->vm_start,
+			vma->vm_page_prot);
+
+		if (ret_value)
+			ion_cp_release_region(cp_heap);
+		else
+			++cp_heap->umap_count;
+	}
+	mutex_unlock(&cp_heap->lock);
+	return ret_value;
+}
+
+void ion_cp_heap_unmap_user(struct ion_heap *heap,
+			struct ion_buffer *buffer)
+{
+	struct ion_cp_heap *cp_heap =
+			container_of(heap, struct ion_cp_heap, heap);
+
+	mutex_lock(&cp_heap->lock);
+	--cp_heap->umap_count;
+	ion_cp_release_region(cp_heap);
+	mutex_unlock(&cp_heap->lock);
+}
+
+int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	struct ion_cp_heap *cp_heap =
+	     container_of(heap, struct  ion_cp_heap, heap);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
+		break;
+	case ION_IOC_INV_CACHES:
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (cp_heap->has_outer_cache) {
+		unsigned long pstart = buffer->priv_phys + offset;
+		outer_cache_op(pstart, pstart + length);
+	}
+	return 0;
+}
+
+static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
+			      const struct rb_root *mem_map)
+{
+	unsigned long total_alloc;
+	unsigned long total_size;
+	unsigned long umap_count;
+	unsigned long kmap_count;
+	unsigned long heap_protected;
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	mutex_lock(&cp_heap->lock);
+	total_alloc = cp_heap->allocated_bytes;
+	total_size = cp_heap->total_size;
+	umap_count = cp_heap->umap_count;
+	kmap_count = ion_cp_get_total_kmap_count(cp_heap);
+	heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
+	mutex_unlock(&cp_heap->lock);
+
+	seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
+	seq_printf(s, "total heap size: %lx\n", total_size);
+	seq_printf(s, "umapping count: %lx\n", umap_count);
+	seq_printf(s, "kmapping count: %lx\n", kmap_count);
+	seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
+	seq_printf(s, "reusable: %s\n", cp_heap->reusable  ? "Yes" : "No");
+
+	if (mem_map) {
+		unsigned long base = cp_heap->base;
+		unsigned long size = cp_heap->total_size;
+		unsigned long end = base+size;
+		unsigned long last_end = base;
+		struct rb_node *n;
+
+		seq_printf(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %16.s %14.s %14.s %14.s\n",
+			   "client", "creator", "start address", "end address",
+			   "size (hex)");
+
+		for (n = rb_first(mem_map); n; n = rb_next(n)) {
+			struct mem_map_data *data =
+					rb_entry(n, struct mem_map_data, node);
+			const char *client_name = "(null)";
+			const char *creator_name = "(null)";
+
+			if (last_end < data->addr) {
+				seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
+					   "FREE", "NA", last_end, data->addr-1,
+					   data->addr-last_end,
+					   data->addr-last_end);
+			}
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			if (data->creator_name)
+				creator_name = data->creator_name;
+
+			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
+				   client_name, creator_name, data->addr,
+				   data->addr_end,
+				   data->size, data->size);
+			last_end = data->addr_end+1;
+		}
+		if (last_end < end) {
+			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n", "FREE", "NA",
+				last_end, end-1, end-last_end, end-last_end);
+		}
+	}
+
+	return 0;
+}
+
+int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
+{
+	int ret_value;
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+	mutex_lock(&cp_heap->lock);
+	if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
+		ret_value = ion_cp_protect(heap, version, data);
+	} else {
+		pr_err("ION cannot secure heap with outstanding mappings: "
+		       "User space: %lu, kernel space (cached): %lu\n",
+		       cp_heap->umap_count, cp_heap->kmap_cached_count);
+		ret_value = -EINVAL;
+	}
+
+	mutex_unlock(&cp_heap->lock);
+	return ret_value;
+}
+
+int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
+{
+	int ret_value = 0;
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+	mutex_lock(&cp_heap->lock);
+	ion_cp_unprotect(heap, version, data);
+	mutex_unlock(&cp_heap->lock);
+	return ret_value;
+}
+
+static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
+			int partition, unsigned long prot)
+{
+	unsigned long left_to_map = cp_heap->total_size;
+	unsigned long page_size = SZ_64K;
+	int ret_value = 0;
+	unsigned long virt_addr_len = cp_heap->total_size;
+	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
+
+	if (domain_num == cp_heap->iommu_2x_map_domain)
+		virt_addr_len <<= 1;
+
+	if (cp_heap->total_size & (SZ_64K-1)) {
+		pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
+		ret_value = -EINVAL;
+	}
+	if (cp_heap->base & (SZ_64K-1)) {
+		pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
+		ret_value = -EINVAL;
+	}
+	if (!ret_value && domain) {
+		unsigned long temp_phys = cp_heap->base;
+		unsigned long temp_iova;
+
+		ret_value = msm_allocate_iova_address(domain_num, partition,
+						virt_addr_len, SZ_64K,
+						&temp_iova);
+
+		if (ret_value) {
+			pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
+				__func__, domain_num, partition);
+			goto out;
+		}
+		cp_heap->iommu_iova[domain_num] = temp_iova;
+
+		while (left_to_map) {
+			int ret = iommu_map(domain, temp_iova, temp_phys,
+					page_size, prot);
+			if (ret) {
+				pr_err("%s: could not map %lx in domain %p, error: %d\n",
+					__func__, temp_iova, domain, ret);
+				ret_value = -EAGAIN;
+				goto free_iova;
+			}
+			temp_iova += page_size;
+			temp_phys += page_size;
+			left_to_map -= page_size;
+		}
+		if (domain_num == cp_heap->iommu_2x_map_domain)
+			ret_value = msm_iommu_map_extra(domain, temp_iova,
+							cp_heap->total_size,
+							SZ_64K, prot);
+		if (ret_value)
+			goto free_iova;
+	} else {
+		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
+		ret_value = -ENOMEM;
+	}
+	goto out;
+
+free_iova:
+	msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
+			      partition, virt_addr_len);
+out:
+	return ret_value;
+}
+
+static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
+				struct ion_iommu_map *data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags)
+{
+	struct iommu_domain *domain;
+	int ret = 0;
+	unsigned long extra;
+	struct ion_cp_heap *cp_heap =
+		container_of(buffer->heap, struct ion_cp_heap, heap);
+	int prot = IOMMU_WRITE | IOMMU_READ;
+	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
+
+	data->mapped_size = iova_length;
+
+	if (!msm_use_iommu()) {
+		data->iova_addr = buffer->priv_phys;
+		return 0;
+	}
+
+	if (cp_heap->iommu_iova[domain_num]) {
+		
+		unsigned long offset = buffer->priv_phys - cp_heap->base;
+		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
+		return 0;
+	} else if (cp_heap->iommu_map_all) {
+		ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
+		if (!ret) {
+			unsigned long offset =
+					buffer->priv_phys - cp_heap->base;
+			data->iova_addr =
+				cp_heap->iommu_iova[domain_num] + offset;
+			cp_heap->iommu_partition[domain_num] = partition_num;
+			data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
+			return 0;
+		} else {
+			cp_heap->iommu_iova[domain_num] = 0;
+			cp_heap->iommu_partition[domain_num] = 0;
+			return ret;
+		}
+	}
+
+	extra = iova_length - buffer->size;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align,
+						&data->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
+			      buffer->size, prot);
+	if (ret) {
+		pr_err("%s: could not map %lx in domain %p\n",
+			__func__, data->iova_addr, domain);
+		goto out1;
+	}
+
+	if (extra) {
+		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
+					  SZ_4K, prot);
+		if (ret)
+			goto out2;
+	}
+	return ret;
+
+out2:
+	iommu_unmap_range(domain, data->iova_addr, buffer->size);
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+out:
+	return ret;
+}
+
+static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+	struct ion_cp_heap *cp_heap =
+		container_of(data->buffer->heap, struct ion_cp_heap, heap);
+
+	if (!msm_use_iommu())
+		return;
+
+
+	domain_num = iommu_map_domain(data);
+
+	if (cp_heap->iommu_iova[domain_num])
+		return;
+
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
+static struct ion_heap_ops cp_heap_ops = {
+	.allocate = ion_cp_heap_allocate,
+	.free = ion_cp_heap_free,
+	.phys = ion_cp_heap_phys,
+	.map_user = ion_cp_heap_map_user,
+	.unmap_user = ion_cp_heap_unmap_user,
+	.map_kernel = ion_cp_heap_map_kernel,
+	.unmap_kernel = ion_cp_heap_unmap_kernel,
+	.map_dma = ion_cp_heap_map_dma,
+	.unmap_dma = ion_cp_heap_unmap_dma,
+	.cache_op = ion_cp_cache_ops,
+	.print_debug = ion_cp_print_debug,
+	.secure_heap = ion_cp_secure_heap,
+	.unsecure_heap = ion_cp_unsecure_heap,
+	.map_iommu = ion_cp_heap_map_iommu,
+	.unmap_iommu = ion_cp_heap_unmap_iommu,
+};
+
+struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_cp_heap *cp_heap;
+	int ret;
+
+	cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
+	if (!cp_heap)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&cp_heap->lock);
+
+	cp_heap->pool = gen_pool_create(12, -1);
+	if (!cp_heap->pool)
+		goto free_heap;
+
+	cp_heap->base = heap_data->base;
+	ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1);
+	if (ret < 0)
+		goto destroy_pool;
+
+	cp_heap->allocated_bytes = 0;
+	cp_heap->umap_count = 0;
+	cp_heap->kmap_cached_count = 0;
+	cp_heap->kmap_uncached_count = 0;
+	cp_heap->total_size = heap_data->size;
+	cp_heap->heap.ops = &cp_heap_ops;
+	cp_heap->heap.type = ION_HEAP_TYPE_CP;
+	cp_heap->heap_protected = HEAP_NOT_PROTECTED;
+	cp_heap->secure_base = cp_heap->base;
+	cp_heap->secure_size = heap_data->size;
+	cp_heap->has_outer_cache = heap_data->has_outer_cache;
+	atomic_set(&cp_heap->protect_cnt, 0);
+	if (heap_data->extra_data) {
+		struct ion_cp_heap_pdata *extra_data =
+				heap_data->extra_data;
+		cp_heap->reusable = extra_data->reusable;
+		cp_heap->reserved_vrange = extra_data->virt_addr;
+		cp_heap->permission_type = extra_data->permission_type;
+		if (extra_data->secure_size) {
+			cp_heap->secure_base = extra_data->secure_base;
+			cp_heap->secure_size = extra_data->secure_size;
+		}
+		if (extra_data->setup_region)
+			cp_heap->bus_id = extra_data->setup_region();
+		if (extra_data->request_region)
+			cp_heap->request_region = extra_data->request_region;
+		if (extra_data->release_region)
+			cp_heap->release_region = extra_data->release_region;
+		cp_heap->iommu_map_all =
+				extra_data->iommu_map_all;
+		cp_heap->iommu_2x_map_domain =
+				extra_data->iommu_2x_map_domain;
+
+	}
+
+	return &cp_heap->heap;
+
+destroy_pool:
+	gen_pool_destroy(cp_heap->pool);
+
+free_heap:
+	kfree(cp_heap);
+
+	return ERR_PTR(-ENOMEM);
+}
+
+void ion_cp_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_cp_heap *cp_heap =
+	     container_of(heap, struct  ion_cp_heap, heap);
+
+	gen_pool_destroy(cp_heap->pool);
+	kfree(cp_heap);
+	cp_heap = NULL;
+}
+
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+		unsigned long *size) \
+{
+	struct ion_cp_heap *cp_heap =
+	     container_of(heap, struct  ion_cp_heap, heap);
+	*base = cp_heap->base;
+	*size = cp_heap->total_size;
+}
+
+
+#define SCM_CP_LOCK_CMD_ID	0x1
+#define SCM_CP_PROTECT		0x1
+#define SCM_CP_UNPROTECT	0x0
+
+struct cp_lock_msg {
+	unsigned int start;
+	unsigned int end;
+	unsigned int permission_type;
+	unsigned char lock;
+} __attribute__ ((__packed__));
+
+static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
+			      unsigned int permission_type)
+{
+	struct cp_lock_msg cmd;
+	cmd.start = phy_base;
+	cmd.end = phy_base + size;
+	cmd.permission_type = permission_type;
+	cmd.lock = SCM_CP_PROTECT;
+
+	return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
+			&cmd, sizeof(cmd), NULL, 0);
+}
+
+static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
+				unsigned int permission_type)
+{
+	struct cp_lock_msg cmd;
+	cmd.start = phy_base;
+	cmd.end = phy_base + size;
+	cmd.permission_type = permission_type;
+	cmd.lock = SCM_CP_UNPROTECT;
+
+	return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
+			&cmd, sizeof(cmd), NULL, 0);
+}
+
+#define V2_CHUNK_SIZE	SZ_1M
+
+static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size,
+			      void *data, int lock)
+{
+	enum cp_mem_usage usage = (enum cp_mem_usage) data;
+	unsigned long *chunk_list;
+	int nchunks;
+	int ret;
+	int i;
+
+	if (usage < 0 || usage >= MAX_USAGE)
+		return -EINVAL;
+
+	if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) {
+		pr_err("%s: heap size is not aligned to %x\n",
+			__func__, V2_CHUNK_SIZE);
+		return -EINVAL;
+	}
+
+	nchunks = size / V2_CHUNK_SIZE;
+
+	chunk_list = allocate_contiguous_ebi(sizeof(unsigned long)*nchunks,
+						SZ_4K, 0);
+	if (!chunk_list)
+		return -ENOMEM;
+
+	for (i = 0; i < nchunks; i++)
+		chunk_list[i] = phy_base + i * V2_CHUNK_SIZE;
+
+	ret = ion_cp_change_chunks_state(memory_pool_node_paddr(chunk_list),
+					nchunks, V2_CHUNK_SIZE, usage, lock);
+
+	free_contiguous_memory(chunk_list);
+	return ret;
+}
+
+static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+			      unsigned int permission_type, int version,
+			      void *data)
+{
+	switch (version) {
+	case ION_CP_V1:
+		return ion_cp_protect_mem_v1(phy_base, size, permission_type);
+	case ION_CP_V2:
+		return ion_cp_change_mem_v2(phy_base, size, data,
+						SCM_CP_PROTECT);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+			      unsigned int permission_type, int version,
+			      void *data)
+{
+	switch (version) {
+	case ION_CP_V1:
+		return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
+	case ION_CP_V2:
+		return ion_cp_change_mem_v2(phy_base, size, data,
+						SCM_CP_UNPROTECT);
+	default:
+		return -EINVAL;
+	}
+}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
new file mode 100644
index 0000000..6ea49db
--- /dev/null
+++ b/drivers/gpu/ion/ion_heap.c
@@ -0,0 +1,85 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include "ion_priv.h"
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_heap *heap = NULL;
+
+	switch (heap_data->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		heap = ion_system_contig_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		heap = ion_system_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		heap = ion_carveout_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_IOMMU:
+		heap = ion_iommu_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CP:
+		heap = ion_cp_heap_create(heap_data);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap_data->type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (IS_ERR_OR_NULL(heap)) {
+		pr_err("%s: error creating heap %s type %d base %lu size %u\n",
+		       __func__, heap_data->name, heap_data->type,
+		       heap_data->base, heap_data->size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	heap->name = heap_data->name;
+	heap->id = heap_data->id;
+	return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+	if (!heap)
+		return;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		ion_system_contig_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		ion_system_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		ion_carveout_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_IOMMU:
+		ion_iommu_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CP:
+		ion_cp_heap_destroy(heap);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap->type);
+	}
+}
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
new file mode 100644
index 0000000..5f6780b
--- /dev/null
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <linux/pfn.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <mach/iommu_domains.h>
+
+struct ion_iommu_heap {
+	struct ion_heap heap;
+	unsigned int has_outer_cache;
+};
+
+struct ion_iommu_priv_data {
+	struct page **pages;
+	int nrpages;
+	unsigned long size;
+};
+
+atomic_t v = ATOMIC_INIT(0);
+
+static int ion_iommu_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	int ret = 0, i;
+	struct ion_iommu_priv_data *data = NULL;
+	pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
+	void *ptr = NULL;
+
+	if (msm_use_iommu()) {
+		struct scatterlist *sg;
+		struct sg_table *table;
+		unsigned int i;
+
+		data = kmalloc(sizeof(*data), GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		data->size = PFN_ALIGN(size);
+		data->nrpages = data->size >> PAGE_SHIFT;
+		data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
+				GFP_KERNEL);
+		if (!data->pages) {
+			ret = -ENOMEM;
+			goto err1;
+		}
+
+		table = buffer->sg_table =
+				kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+		if (!table) {
+			ret = -ENOMEM;
+			goto err1;
+		}
+		ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
+		if (ret)
+			goto err2;
+
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			data->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+			if (!data->pages[i])
+				goto err3;
+
+			sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
+		}
+
+		ptr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
+		if (ptr != NULL) {
+			memset(ptr, 0, data->size);
+			dmac_flush_range(ptr, ptr + data->size);
+			vunmap(ptr);
+		} else
+			pr_err("%s: vmap() failed\n", __func__);
+
+		buffer->priv_virt = data;
+		
+		atomic_add(data->size, &v);
+		
+		return 0;
+
+	} else {
+		return -ENOMEM;
+	}
+
+
+err3:
+	sg_free_table(buffer->sg_table);
+err2:
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
+
+	for (i = 0; i < data->nrpages; i++) {
+		if (data->pages[i])
+			__free_page(data->pages[i]);
+	}
+	kfree(data->pages);
+err1:
+	kfree(data);
+	return ret;
+}
+
+static void ion_iommu_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_iommu_priv_data *data = buffer->priv_virt;
+	int i;
+
+	if (!data)
+		return;
+
+	for (i = 0; i < data->nrpages; i++)
+		__free_page(data->pages[i]);
+
+	
+	atomic_sub(data->size, &v);
+	
+
+	kfree(data->pages);
+	kfree(data);
+}
+
+int ion_iommu_heap_dump_size(void)
+{
+	int ret = atomic_read(&v);
+	return ret;
+}
+
+static int ion_iommu_print_debug(struct ion_heap *heap, struct seq_file *s,
+				    const struct rb_root *mem_map)
+{
+	seq_printf(s, "Total bytes currently allocated: %d (%x)\n",
+		atomic_read(&v), atomic_read(&v));
+
+	if (mem_map) {
+		struct rb_node *n;
+
+		seq_printf(s, "\nBuffer Info\n");
+		seq_printf(s, "%16.s %16.s %14.s\n",
+			   "client", "creator", "size (hex)");
+
+		for (n = rb_first(mem_map); n; n = rb_next(n)) {
+			struct mem_map_data *data =
+					rb_entry(n, struct mem_map_data, node);
+			const char *client_name = "(null)";
+			const char *creator_name = "(null)";
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			if (data->creator_name)
+				creator_name = data->creator_name;
+
+			seq_printf(s, "%16.s %16.s %14lu (%lx)\n",
+				   client_name, creator_name, data->size, data->size);
+		}
+	}
+	return 0;
+}
+
+void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
+				struct ion_buffer *buffer)
+{
+	struct ion_iommu_priv_data *data = buffer->priv_virt;
+	pgprot_t page_prot = PAGE_KERNEL;
+
+	if (!data)
+		return NULL;
+
+	if (!ION_IS_CACHED(buffer->flags))
+		page_prot = pgprot_noncached(page_prot);
+
+	buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
+
+	return buffer->vaddr;
+}
+
+void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
+				    struct ion_buffer *buffer)
+{
+	if (!buffer->vaddr)
+		return;
+
+	vunmap(buffer->vaddr);
+	buffer->vaddr = NULL;
+}
+
+int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+			       struct vm_area_struct *vma)
+{
+	struct ion_iommu_priv_data *data = buffer->priv_virt;
+	int i;
+	unsigned long curr_addr;
+	if (!data)
+		return -EINVAL;
+
+	if (!ION_IS_CACHED(buffer->flags))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	curr_addr = vma->vm_start;
+	for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
+		if (vm_insert_page(vma, curr_addr, data->pages[i])) {
+			return -EINVAL;
+		}
+		curr_addr += PAGE_SIZE;
+	}
+	return 0;
+}
+
+int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
+					struct ion_iommu_map *data,
+					unsigned int domain_num,
+					unsigned int partition_num,
+					unsigned long align,
+					unsigned long iova_length,
+					unsigned long flags)
+{
+	struct iommu_domain *domain;
+	int ret = 0;
+	unsigned long extra;
+	int prot = IOMMU_WRITE | IOMMU_READ;
+	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
+
+	BUG_ON(!msm_use_iommu());
+
+	data->mapped_size = iova_length;
+	extra = iova_length - buffer->size;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align,
+						&data->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	ret = iommu_map_range(domain, data->iova_addr,
+			      buffer->sg_table->sgl,
+			      buffer->size, prot);
+	if (ret) {
+		pr_err("%s: could not map %lx in domain %p\n",
+			__func__, data->iova_addr, domain);
+		goto out1;
+	}
+
+	if (extra) {
+		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+					  prot);
+		if (ret)
+			goto out2;
+	}
+	return ret;
+
+out2:
+	iommu_unmap_range(domain, data->iova_addr, buffer->size);
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				buffer->size);
+
+out:
+
+	return ret;
+}
+
+void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	BUG_ON(!msm_use_iommu());
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
+static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	struct ion_iommu_heap *iommu_heap =
+	     container_of(heap, struct  ion_iommu_heap, heap);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
+		break;
+	case ION_IOC_INV_CACHES:
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (iommu_heap->has_outer_cache) {
+		unsigned long pstart;
+		unsigned int i;
+		struct ion_iommu_priv_data *data = buffer->priv_virt;
+		if (!data)
+			return -ENOMEM;
+
+		for (i = 0; i < data->nrpages; ++i) {
+			pstart = page_to_phys(data->pages[i]);
+			outer_cache_op(pstart, pstart + PAGE_SIZE);
+		}
+	}
+	return 0;
+}
+
+static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
+					      struct ion_buffer *buffer)
+{
+	return buffer->sg_table;
+}
+
+static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
+}
+
+static struct ion_heap_ops iommu_heap_ops = {
+	.allocate = ion_iommu_heap_allocate,
+	.free = ion_iommu_heap_free,
+	.map_user = ion_iommu_heap_map_user,
+	.map_kernel = ion_iommu_heap_map_kernel,
+	.unmap_kernel = ion_iommu_heap_unmap_kernel,
+	.map_iommu = ion_iommu_heap_map_iommu,
+	.unmap_iommu = ion_iommu_heap_unmap_iommu,
+	.cache_op = ion_iommu_cache_ops,
+	.map_dma = ion_iommu_heap_map_dma,
+	.unmap_dma = ion_iommu_heap_unmap_dma,
+	.print_debug = ion_iommu_print_debug,
+};
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_iommu_heap *iommu_heap;
+
+	iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
+	if (!iommu_heap)
+		return ERR_PTR(-ENOMEM);
+
+	iommu_heap->heap.ops = &iommu_heap_ops;
+	iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
+	iommu_heap->has_outer_cache = heap_data->has_outer_cache;
+
+	return &iommu_heap->heap;
+}
+
+void ion_iommu_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_iommu_heap *iommu_heap =
+	     container_of(heap, struct  ion_iommu_heap, heap);
+
+	kfree(iommu_heap);
+	iommu_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
new file mode 100644
index 0000000..31a0d81
--- /dev/null
+++ b/drivers/gpu/ion/ion_priv.h
@@ -0,0 +1,181 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+enum {
+	DI_PARTITION_NUM = 0,
+	DI_DOMAIN_NUM = 1,
+	DI_MAX,
+};
+
+struct ion_iommu_map {
+	unsigned long iova_addr;
+	struct rb_node node;
+	union {
+		int domain_info[DI_MAX];
+		uint64_t key;
+	};
+	struct ion_buffer *buffer;
+	struct kref ref;
+	int mapped_size;
+	unsigned long flags;
+};
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+struct ion_buffer {
+	struct kref ref;
+	struct rb_node node;
+	struct ion_device *dev;
+	struct ion_heap *heap;
+	struct ion_client *creator;
+	unsigned long flags;
+	size_t size;
+	union {
+		void *priv_virt;
+		ion_phys_addr_t priv_phys;
+	};
+	struct mutex lock;
+	int kmap_cnt;
+	void *vaddr;
+	int dmap_cnt;
+	struct sg_table *sg_table;
+	int umap_cnt;
+	unsigned int iommu_map_cnt;
+	struct rb_root iommu_maps;
+	int marked;
+};
+
+struct ion_heap_ops {
+	int (*allocate) (struct ion_heap *heap,
+			 struct ion_buffer *buffer, unsigned long len,
+			 unsigned long align, unsigned long flags);
+	void (*free) (struct ion_buffer *buffer);
+	int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+		     ion_phys_addr_t *addr, size_t *len);
+	struct sg_table *(*map_dma) (struct ion_heap *heap,
+					struct ion_buffer *buffer);
+	void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+	void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+	void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+	int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+			 struct vm_area_struct *vma);
+	void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
+	int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset,
+			unsigned int length, unsigned int cmd);
+	int (*map_iommu)(struct ion_buffer *buffer,
+				struct ion_iommu_map *map_data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags);
+	void (*unmap_iommu)(struct ion_iommu_map *data);
+	int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
+			   const struct rb_root *mem_map);
+	int (*secure_heap)(struct ion_heap *heap, int version, void *data);
+	int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
+};
+
+struct ion_heap {
+	struct rb_node node;
+	struct ion_device *dev;
+	enum ion_heap_type type;
+	struct ion_heap_ops *ops;
+	int id;
+	const char *name;
+};
+
+struct mem_map_data {
+	struct rb_node node;
+	unsigned long addr;
+	unsigned long addr_end;
+	unsigned long size;
+	const char *client_name;
+	const char *creator_name;
+};
+
+#define iommu_map_domain(__m)		((__m)->domain_info[1])
+#define iommu_map_partition(__m)	((__m)->domain_info[0])
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg));
+
+void ion_device_destroy(struct ion_device *dev);
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
+void ion_iommu_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
+void ion_cp_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *);
+void ion_reusable_heap_destroy(struct ion_heap *);
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+				      unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size);
+
+
+struct ion_heap *msm_get_contiguous_heap(void);
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+#define ION_CP_ALLOCATE_FAIL -1
+
+#define ION_RESERVED_ALLOCATE_FAIL -1
+
+void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
+				void *virt_base, unsigned long flags);
+
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *uaddr, unsigned long offset, unsigned long len,
+			unsigned int cmd);
+
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+			unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
+#endif 
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
new file mode 100644
index 0000000..299c24c
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -0,0 +1,548 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+#include <mach/iommu_domains.h>
+#include "ion_priv.h"
+#include <mach/memory.h>
+#include <asm/cacheflush.h>
+
+static atomic_t system_heap_allocated;
+static atomic_t system_contig_heap_allocated;
+static unsigned int system_heap_has_outer_cache;
+static unsigned int system_heap_contig_has_outer_cache;
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+				     struct ion_buffer *buffer,
+				     unsigned long size, unsigned long align,
+				     unsigned long flags)
+{
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int i, j;
+	int npages = PAGE_ALIGN(size) / PAGE_SIZE;
+
+	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+	i = sg_alloc_table(table, npages, GFP_KERNEL);
+	if (i)
+		goto err0;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page;
+		page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+		if (!page)
+			goto err1;
+		sg_set_page(sg, page, PAGE_SIZE, 0);
+	}
+	buffer->priv_virt = table;
+	atomic_add(size, &system_heap_allocated);
+	return 0;
+err1:
+	for_each_sg(table->sgl, sg, i, j)
+		__free_page(sg_page(sg));
+	sg_free_table(table);
+err0:
+	kfree(table);
+	return -ENOMEM;
+}
+
+void ion_system_heap_free(struct ion_buffer *buffer)
+{
+	int i;
+	struct scatterlist *sg;
+	struct sg_table *table = buffer->priv_virt;
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		__free_page(sg_page(sg));
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	atomic_sub(buffer->size, &system_heap_allocated);
+}
+
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+					 struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+void ion_system_heap_unmap_dma(struct ion_heap *heap,
+			       struct ion_buffer *buffer)
+{
+	return;
+}
+
+void *ion_system_heap_map_kernel(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	if (!ION_IS_CACHED(buffer->flags)) {
+		pr_err("%s: cannot map system heap uncached\n", __func__);
+		return ERR_PTR(-EINVAL);
+	} else {
+		struct scatterlist *sg;
+		int i;
+		void *vaddr;
+		struct sg_table *table = buffer->priv_virt;
+		struct page **pages = kmalloc(
+					sizeof(struct page *) * table->nents,
+					GFP_KERNEL);
+
+		for_each_sg(table->sgl, sg, table->nents, i)
+			pages[i] = sg_page(sg);
+		vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
+		kfree(pages);
+
+		return vaddr;
+	}
+}
+
+void ion_system_heap_unmap_kernel(struct ion_heap *heap,
+				  struct ion_buffer *buffer)
+{
+	vunmap(buffer->vaddr);
+}
+
+void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	if (!msm_use_iommu())
+		return;
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
+int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+			     struct vm_area_struct *vma)
+{
+	if (!ION_IS_CACHED(buffer->flags)) {
+		pr_err("%s: cannot map system heap uncached\n", __func__);
+		return -EINVAL;
+	} else {
+		struct sg_table *table = buffer->priv_virt;
+		unsigned long addr = vma->vm_start;
+		unsigned long offset = vma->vm_pgoff;
+		struct scatterlist *sg;
+		int i;
+
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			if (offset) {
+				offset--;
+				continue;
+			}
+			vm_insert_page(vma, addr, sg_page(sg));
+			addr += PAGE_SIZE;
+		}
+		return 0;
+	}
+}
+
+int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
+		break;
+	case ION_IOC_INV_CACHES:
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (system_heap_has_outer_cache) {
+		unsigned long pstart;
+		struct sg_table *table = buffer->priv_virt;
+		struct scatterlist *sg;
+		int i;
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			struct page *page = sg_page(sg);
+			pstart = page_to_phys(page);
+			if (!pstart) {
+				WARN(1, "Could not translate virtual address to physical address\n");
+				return -EINVAL;
+			}
+			outer_cache_op(pstart, pstart + PAGE_SIZE);
+		}
+	}
+	return 0;
+}
+
+static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
+				  const struct rb_root *unused)
+{
+	seq_printf(s, "total bytes currently allocated: %lx\n",
+			(unsigned long) atomic_read(&system_heap_allocated));
+
+	return 0;
+}
+
+int ion_system_heap_map_iommu(struct ion_buffer *buffer,
+				struct ion_iommu_map *data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags)
+{
+	int ret = 0;
+	struct iommu_domain *domain;
+	unsigned long extra;
+	unsigned long extra_iova_addr;
+	struct sg_table *table = buffer->priv_virt;
+	int prot = IOMMU_WRITE | IOMMU_READ;
+	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
+
+	if (!ION_IS_CACHED(flags))
+		return -EINVAL;
+
+	if (!msm_use_iommu())
+		return -EINVAL;
+
+	data->mapped_size = iova_length;
+	extra = iova_length - buffer->size;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align,
+						&data->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	ret = iommu_map_range(domain, data->iova_addr, table->sgl,
+			      buffer->size, prot);
+
+	if (ret) {
+		pr_err("%s: could not map %lx in domain %p\n",
+			__func__, data->iova_addr, domain);
+		goto out1;
+	}
+
+	extra_iova_addr = data->iova_addr + buffer->size;
+	if (extra) {
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+					  prot);
+		if (ret)
+			goto out2;
+	}
+	return ret;
+
+out2:
+	iommu_unmap_range(domain, data->iova_addr, buffer->size);
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+out:
+	return ret;
+}
+
+static struct ion_heap_ops vmalloc_ops = {
+	.allocate = ion_system_heap_allocate,
+	.free = ion_system_heap_free,
+	.map_dma = ion_system_heap_map_dma,
+	.unmap_dma = ion_system_heap_unmap_dma,
+	.map_kernel = ion_system_heap_map_kernel,
+	.unmap_kernel = ion_system_heap_unmap_kernel,
+	.map_user = ion_system_heap_map_user,
+	.cache_op = ion_system_heap_cache_ops,
+	.print_debug = ion_system_print_debug,
+	.map_iommu = ion_system_heap_map_iommu,
+	.unmap_iommu = ion_system_heap_unmap_iommu,
+};
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->ops = &vmalloc_ops;
+	heap->type = ION_HEAP_TYPE_SYSTEM;
+	system_heap_has_outer_cache = pheap->has_outer_cache;
+	return heap;
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+					   struct ion_buffer *buffer,
+					   unsigned long len,
+					   unsigned long align,
+					   unsigned long flags)
+{
+	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
+	if (!buffer->priv_virt)
+		return -ENOMEM;
+	atomic_add(len, &system_contig_heap_allocated);
+	return 0;
+}
+
+void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+	kfree(buffer->priv_virt);
+	atomic_sub(buffer->size, &system_contig_heap_allocated);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+				       struct ion_buffer *buffer,
+				       ion_phys_addr_t *addr, size_t *len)
+{
+	*addr = virt_to_phys(buffer->priv_virt);
+	*len = buffer->size;
+	return 0;
+}
+
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+						   struct ion_buffer *buffer)
+{
+	struct sg_table *table;
+	int ret;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret) {
+		kfree(table);
+		return ERR_PTR(ret);
+	}
+	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
+		    0);
+	return table;
+}
+
+int ion_system_contig_heap_map_user(struct ion_heap *heap,
+				    struct ion_buffer *buffer,
+				    struct vm_area_struct *vma)
+{
+	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
+
+	if (ION_IS_CACHED(buffer->flags))
+		return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+			       vma->vm_end - vma->vm_start,
+			       vma->vm_page_prot);
+	else {
+		pr_err("%s: cannot map system heap uncached\n", __func__);
+		return -EINVAL;
+	}
+}
+
+int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
+			struct ion_buffer *buffer, void *vaddr,
+			unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
+		break;
+	case ION_IOC_INV_CACHES:
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (system_heap_contig_has_outer_cache) {
+		unsigned long pstart;
+
+		pstart = virt_to_phys(buffer->priv_virt) + offset;
+		if (!pstart) {
+			WARN(1, "Could not do virt to phys translation on %p\n",
+				buffer->priv_virt);
+			return -EINVAL;
+		}
+
+		outer_cache_op(pstart, pstart + PAGE_SIZE);
+	}
+
+	return 0;
+}
+
+static int ion_system_contig_print_debug(struct ion_heap *heap,
+					 struct seq_file *s,
+					 const struct rb_root *unused)
+{
+	seq_printf(s, "total bytes currently allocated: %lx\n",
+		(unsigned long) atomic_read(&system_contig_heap_allocated));
+
+	return 0;
+}
+
+int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
+				struct ion_iommu_map *data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags)
+{
+	int ret = 0;
+	struct iommu_domain *domain;
+	unsigned long extra;
+	struct scatterlist *sglist = 0;
+	struct page *page = 0;
+	int prot = IOMMU_WRITE | IOMMU_READ;
+	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
+
+	if (!ION_IS_CACHED(flags))
+		return -EINVAL;
+
+	if (!msm_use_iommu()) {
+		data->iova_addr = virt_to_phys(buffer->vaddr);
+		return 0;
+	}
+
+	data->mapped_size = iova_length;
+	extra = iova_length - buffer->size;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align,
+						&data->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+	page = virt_to_page(buffer->vaddr);
+
+	sglist = vmalloc(sizeof(*sglist));
+	if (!sglist)
+		goto out1;
+
+	sg_init_table(sglist, 1);
+	sg_set_page(sglist, page, buffer->size, 0);
+
+	ret = iommu_map_range(domain, data->iova_addr, sglist,
+			      buffer->size, prot);
+	if (ret) {
+		pr_err("%s: could not map %lx in domain %p\n",
+			__func__, data->iova_addr, domain);
+		goto out1;
+	}
+
+	if (extra) {
+		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+					  prot);
+		if (ret)
+			goto out2;
+	}
+	vfree(sglist);
+	return ret;
+out2:
+	iommu_unmap_range(domain, data->iova_addr, buffer->size);
+
+out1:
+	vfree(sglist);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+						data->mapped_size);
+out:
+	return ret;
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+	.allocate = ion_system_contig_heap_allocate,
+	.free = ion_system_contig_heap_free,
+	.phys = ion_system_contig_heap_phys,
+	.map_dma = ion_system_contig_heap_map_dma,
+	.unmap_dma = ion_system_heap_unmap_dma,
+	.map_kernel = ion_system_heap_map_kernel,
+	.unmap_kernel = ion_system_heap_unmap_kernel,
+	.map_user = ion_system_contig_heap_map_user,
+	.cache_op = ion_system_contig_heap_cache_ops,
+	.print_debug = ion_system_contig_print_debug,
+	.map_iommu = ion_system_contig_heap_map_iommu,
+	.unmap_iommu = ion_system_heap_unmap_iommu,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->ops = &kmalloc_ops;
+	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+	system_heap_contig_has_outer_cache = pheap->has_outer_cache;
+	return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
+
diff --git a/drivers/gpu/ion/msm/Makefile b/drivers/gpu/ion/msm/Makefile
new file mode 100644
index 0000000..1893405
--- /dev/null
+++ b/drivers/gpu/ion/msm/Makefile
@@ -0,0 +1 @@
+obj-y += msm_ion.o ion_cp_common.o
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
new file mode 100644
index 0000000..b274ba2
--- /dev/null
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 Google, Inc
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <mach/scm.h>
+
+#include "ion_cp_common.h"
+
+#define MEM_PROTECT_LOCK_ID	0x05
+
+struct cp2_mem_chunks {
+	unsigned int *chunk_list;
+	unsigned int chunk_list_size;
+	unsigned int chunk_size;
+} __attribute__ ((__packed__));
+
+struct cp2_lock_req {
+	struct cp2_mem_chunks chunks;
+	unsigned int mem_usage;
+	unsigned int lock;
+} __attribute__ ((__packed__));
+
+int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks,
+				unsigned int chunk_size,
+				enum cp_mem_usage usage,
+				int lock)
+{
+	struct cp2_lock_req request;
+
+	request.mem_usage = usage;
+	request.lock = lock;
+
+	request.chunks.chunk_list = (unsigned int *)chunks;
+	request.chunks.chunk_list_size = nchunks;
+	request.chunks.chunk_size = chunk_size;
+
+	return scm_call(SCM_SVC_CP, MEM_PROTECT_LOCK_ID,
+			&request, sizeof(request), NULL, 0);
+
+}
+
diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h
new file mode 100644
index 0000000..950966d
--- /dev/null
+++ b/drivers/gpu/ion/msm/ion_cp_common.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ION_CP_COMMON_H
+#define ION_CP_COMMON_H
+
+#include <asm-generic/errno-base.h>
+#include <linux/ion.h>
+
+#define ION_CP_V1	1
+#define ION_CP_V2	2
+
+#if defined(CONFIG_ION_MSM)
+int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks,
+			unsigned int chunk_size, enum cp_mem_usage usage,
+			int lock);
+
+#else
+static inline int ion_cp_change_chunks_state(unsigned long chunks,
+			unsigned int nchunks, unsigned int chunk_size,
+			enum cp_mem_usage usage, int lock)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
new file mode 100644
index 0000000..03a33f3
--- /dev/null
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -0,0 +1,351 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/memory_alloc.h>
+#include <linux/fmem.h>
+#include <mach/ion.h>
+#include <mach/msm_memtypes.h>
+#include "../ion_priv.h"
+#include "ion_cp_common.h"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+struct ion_client *msm_ion_client_create(unsigned int heap_mask,
+					const char *name)
+{
+	return ion_client_create(idev, heap_mask, name);
+}
+EXPORT_SYMBOL(msm_ion_client_create);
+
+int msm_ion_secure_heap(int heap_id)
+{
+	return ion_secure_heap(idev, heap_id, ION_CP_V1, NULL);
+}
+EXPORT_SYMBOL(msm_ion_secure_heap);
+
+int msm_ion_unsecure_heap(int heap_id)
+{
+	return ion_unsecure_heap(idev, heap_id, ION_CP_V1, NULL);
+}
+EXPORT_SYMBOL(msm_ion_unsecure_heap);
+
+int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
+{
+	return ion_secure_heap(idev, heap_id, ION_CP_V2, (void *)usage);
+}
+EXPORT_SYMBOL(msm_ion_secure_heap_2_0);
+
+int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage)
+{
+	return ion_unsecure_heap(idev, heap_id, ION_CP_V2, (void *)usage);
+}
+EXPORT_SYMBOL(msm_ion_unsecure_heap_2_0);
+
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *vaddr, unsigned long len, unsigned int cmd)
+{
+	return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+}
+EXPORT_SYMBOL(msm_ion_do_cache_op);
+
+static unsigned long msm_ion_get_base(unsigned long size, int memory_type,
+				    unsigned int align)
+{
+	switch (memory_type) {
+	case ION_EBI_TYPE:
+		return allocate_contiguous_ebi_nomap(size, align);
+		break;
+	case ION_SMI_TYPE:
+		return allocate_contiguous_memory_nomap(size, MEMTYPE_SMI,
+							align);
+		break;
+	default:
+		pr_err("%s: Unknown memory type %d\n", __func__, memory_type);
+		return 0;
+	}
+}
+
+static struct ion_platform_heap *find_heap(const struct ion_platform_heap
+					   heap_data[],
+					   unsigned int nr_heaps,
+					   int heap_id)
+{
+	unsigned int i;
+	for (i = 0; i < nr_heaps; ++i) {
+		const struct ion_platform_heap *heap = &heap_data[i];
+		if (heap->id == heap_id)
+			return (struct ion_platform_heap *) heap;
+	}
+	return 0;
+}
+
+static void ion_set_base_address(struct ion_platform_heap *heap,
+			    struct ion_platform_heap *shared_heap,
+			    struct ion_co_heap_pdata *co_heap_data,
+			    struct ion_cp_heap_pdata *cp_data)
+{
+	if (cp_data->reusable) {
+		const struct fmem_data *fmem_info = fmem_get_info();
+
+		if (!fmem_info) {
+			pr_err("fmem info pointer NULL!\n");
+			BUG();
+		}
+
+		heap->base = fmem_info->phys - fmem_info->reserved_size_low;
+		cp_data->virt_addr = fmem_info->virt;
+		pr_info("ION heap %s using FMEM\n", shared_heap->name);
+	} else {
+		heap->base = msm_ion_get_base(heap->size + shared_heap->size,
+						shared_heap->memory_type,
+						co_heap_data->align);
+	}
+	if (heap->base) {
+		shared_heap->base = heap->base + heap->size;
+		cp_data->secure_base = heap->base;
+		cp_data->secure_size = heap->size + shared_heap->size;
+	} else {
+		pr_err("%s: could not get memory for heap %s (id %x)\n",
+			__func__, heap->name, heap->id);
+	}
+}
+
+static void allocate_co_memory(struct ion_platform_heap *heap,
+			       struct ion_platform_heap heap_data[],
+			       unsigned int nr_heaps)
+{
+	struct ion_co_heap_pdata *co_heap_data =
+		(struct ion_co_heap_pdata *) heap->extra_data;
+
+	if (co_heap_data->adjacent_mem_id != INVALID_HEAP_ID) {
+		struct ion_platform_heap *shared_heap =
+			find_heap(heap_data, nr_heaps,
+				  co_heap_data->adjacent_mem_id);
+		if (shared_heap) {
+			struct ion_cp_heap_pdata *cp_data =
+			   (struct ion_cp_heap_pdata *) shared_heap->extra_data;
+			if (cp_data->fixed_position == FIXED_MIDDLE) {
+				const struct fmem_data *fmem_info =
+					fmem_get_info();
+
+				if (!fmem_info) {
+					pr_err("fmem info pointer NULL!\n");
+					BUG();
+				}
+
+				cp_data->virt_addr = fmem_info->virt;
+				if (!cp_data->secure_base) {
+					cp_data->secure_base = heap->base;
+					cp_data->secure_size =
+						heap->size + shared_heap->size;
+				}
+			} else if (!heap->base) {
+				ion_set_base_address(heap, shared_heap,
+					co_heap_data, cp_data);
+			}
+		}
+	}
+}
+
+static void msm_ion_heap_fixup(struct ion_platform_heap heap_data[],
+			       unsigned int nr_heaps)
+{
+	unsigned int i;
+
+	for (i = 0; i < nr_heaps; i++) {
+		struct ion_platform_heap *heap = &heap_data[i];
+		if (heap->type == ION_HEAP_TYPE_CARVEOUT) {
+			if (heap->extra_data)
+				allocate_co_memory(heap, heap_data, nr_heaps);
+		}
+	}
+}
+
+static void msm_ion_allocate(struct ion_platform_heap *heap)
+{
+
+	if (!heap->base && heap->extra_data) {
+		unsigned int align = 0;
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CARVEOUT:
+			align =
+			((struct ion_co_heap_pdata *) heap->extra_data)->align;
+			break;
+		case ION_HEAP_TYPE_CP:
+		{
+			struct ion_cp_heap_pdata *data =
+				(struct ion_cp_heap_pdata *)
+				heap->extra_data;
+			if (data->reusable) {
+				const struct fmem_data *fmem_info =
+					fmem_get_info();
+				heap->base = fmem_info->phys;
+				data->virt_addr = fmem_info->virt;
+				pr_info("ION heap %s using FMEM\n", heap->name);
+			} else if (data->mem_is_fmem) {
+				const struct fmem_data *fmem_info =
+					fmem_get_info();
+				heap->base = fmem_info->phys + fmem_info->size;
+			}
+			align = data->align;
+			break;
+		}
+		default:
+			break;
+		}
+		if (align && !heap->base) {
+			heap->base = msm_ion_get_base(heap->size,
+						      heap->memory_type,
+						      align);
+			if (!heap->base)
+				pr_err("%s: could not get memory for heap %s "
+				   "(id %x)\n", __func__, heap->name, heap->id);
+		}
+	}
+}
+
+static int is_heap_overlapping(const struct ion_platform_heap *heap1,
+				const struct ion_platform_heap *heap2)
+{
+	unsigned long heap1_base = heap1->base;
+	unsigned long heap2_base = heap2->base;
+	unsigned long heap1_end = heap1->base + heap1->size - 1;
+	unsigned long heap2_end = heap2->base + heap2->size - 1;
+
+	if (heap1_base == heap2_base)
+		return 1;
+	if (heap1_base < heap2_base && heap1_end >= heap2_base)
+		return 1;
+	if (heap2_base < heap1_base && heap2_end >= heap1_base)
+		return 1;
+	return 0;
+}
+
+static void check_for_heap_overlap(const struct ion_platform_heap heap_list[],
+				   unsigned long nheaps)
+{
+	unsigned long i;
+	unsigned long j;
+
+	for (i = 0; i < nheaps; ++i) {
+		const struct ion_platform_heap *heap1 = &heap_list[i];
+		if (!heap1->base)
+			continue;
+		for (j = i + 1; j < nheaps; ++j) {
+			const struct ion_platform_heap *heap2 = &heap_list[j];
+			if (!heap2->base)
+				continue;
+			if (is_heap_overlapping(heap1, heap2)) {
+				panic("Memory in heap %s overlaps with heap %s\n",
+					heap1->name, heap2->name);
+			}
+		}
+	}
+}
+
+static int msm_ion_probe(struct platform_device *pdev)
+{
+	struct ion_platform_data *pdata = pdev->dev.platform_data;
+	int err;
+	int i;
+
+	num_heaps = pdata->nr;
+
+	heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
+
+	if (!heaps) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	idev = ion_device_create(NULL);
+	if (IS_ERR_OR_NULL(idev)) {
+		err = PTR_ERR(idev);
+		goto freeheaps;
+	}
+
+	msm_ion_heap_fixup(pdata->heaps, num_heaps);
+
+	
+	for (i = 0; i < num_heaps; i++) {
+		struct ion_platform_heap *heap_data = &pdata->heaps[i];
+		msm_ion_allocate(heap_data);
+
+		heap_data->has_outer_cache = pdata->has_outer_cache;
+		heaps[i] = ion_heap_create(heap_data);
+		if (IS_ERR_OR_NULL(heaps[i])) {
+			heaps[i] = 0;
+			continue;
+		} else {
+			if (heap_data->size)
+				pr_info("ION heap %s created at %lx "
+					"with size %x\n", heap_data->name,
+							  heap_data->base,
+							  heap_data->size);
+			else
+				pr_info("ION heap %s created\n",
+							  heap_data->name);
+		}
+
+		ion_device_add_heap(idev, heaps[i]);
+	}
+
+	check_for_heap_overlap(pdata->heaps, num_heaps);
+	platform_set_drvdata(pdev, idev);
+	return 0;
+
+freeheaps:
+	kfree(heaps);
+out:
+	return err;
+}
+
+static int msm_ion_remove(struct platform_device *pdev)
+{
+	struct ion_device *idev = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < num_heaps; i++)
+		ion_heap_destroy(heaps[i]);
+
+	ion_device_destroy(idev);
+	kfree(heaps);
+	return 0;
+}
+
+static struct platform_driver msm_ion_driver = {
+	.probe = msm_ion_probe,
+	.remove = msm_ion_remove,
+	.driver = { .name = "ion-msm" }
+};
+
+static int __init msm_ion_init(void)
+{
+	return platform_driver_register(&msm_ion_driver);
+}
+
+static void __exit msm_ion_exit(void)
+{
+	platform_driver_unregister(&msm_ion_driver);
+}
+
+subsys_initcall(msm_ion_init);
+module_exit(msm_ion_exit);
+
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile
new file mode 100644
index 0000000..11cd003
--- /dev/null
+++ b/drivers/gpu/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o