Copied caf 2.5.1 video/gpu genlock and rotator [WIP]
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index c9e8a94..51349f6 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o
+obj-$(CONFIG_CMA) += ion_cma_heap.o
 obj-$(CONFIG_ION_TEGRA) += tegra/
 obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index c9df909..d005605 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -33,11 +33,20 @@
 #include <linux/uaccess.h>
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
+#include <linux/msm_ion.h>
 
 #include <mach/iommu_domains.h>
 #include "ion_priv.h"
 #define DEBUG
 
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:		the actual misc device
+ * @buffers:	an rb tree of all the existing buffers
+ * @lock:		lock protecting the buffers & heaps trees
+ * @heaps:		list of all the heaps in the system
+ * @user_clients:	list of all the clients created from userspace
+ */
 struct ion_device {
 	struct miscdevice dev;
 	struct rb_root buffers;
@@ -49,6 +58,20 @@
 	struct dentry *debug_root;
 };
 
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node:		node in the tree of all clients
+ * @dev:		backpointer to ion device
+ * @handles:		an rb tree of all the handles in this client
+ * @lock:		lock protecting the tree of handles
+ * @heap_mask:		mask of all supported heaps
+ * @name:		used for debugging
+ * @task:		used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
 struct ion_client {
 	struct rb_node node;
 	struct ion_device *dev;
@@ -61,6 +84,18 @@
 	struct dentry *debug_root;
 };
 
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:		reference count
+ * @client:		back pointer to the client the buffer resides in
+ * @buffer:		pointer to the buffer
+ * @node:		node in the client's handle rbtree
+ * @kmap_cnt:		count of times this client has mapped to kernel
+ * @dmap_cnt:		count of times this client has mapped for dma
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
 struct ion_handle {
 	struct kref ref;
 	struct ion_client *client;
@@ -72,24 +107,7 @@
 
 static void ion_iommu_release(struct kref *kref);
 
-static int ion_validate_buffer_flags(struct ion_buffer *buffer,
-					unsigned long flags)
-{
-	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
-		buffer->iommu_map_cnt) {
-		if (buffer->flags != flags) {
-			pr_err("%s: buffer was already mapped with flags %lx,"
-				" cannot map with flags %lx\n", __func__,
-				buffer->flags, flags);
-			return 1;
-		}
-
-	} else {
-		buffer->flags = flags;
-	}
-	return 0;
-}
-
+/* this function should only be called while dev->lock is held */
 static void ion_buffer_add(struct ion_device *dev,
 			   struct ion_buffer *buffer)
 {
@@ -170,6 +188,7 @@
 	return NULL;
 }
 
+/* this function should only be called while dev->lock is held */
 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 				     struct ion_device *dev,
 				     unsigned long len,
@@ -195,6 +214,7 @@
 
 	buffer->dev = dev;
 	buffer->size = len;
+	buffer->flags = flags;
 
 	table = buffer->heap->ops->map_dma(buffer->heap, buffer);
 	if (IS_ERR_OR_NULL(table)) {
@@ -209,6 +229,10 @@
 	return buffer;
 }
 
+/**
+ * Check for delayed IOMMU unmapping. Also unmap any outstanding
+ * mappings which would otherwise have been leaked.
+ */
 static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
 {
 	struct ion_iommu_map *iommu_map;
@@ -229,7 +253,7 @@
 				__func__, iommu_map->domain_info[DI_DOMAIN_NUM],
 				iommu_map->domain_info[DI_PARTITION_NUM]);
 		}
-		
+		/* set ref count to 1 to force release */
 		kref_init(&iommu_map->ref);
 		kref_put(&iommu_map->ref, ion_iommu_release);
 	}
@@ -371,7 +395,8 @@
 }
 
 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
-			     size_t align, unsigned int flags)
+			     size_t align, unsigned int heap_mask,
+			     unsigned int flags)
 {
 	struct rb_node *n;
 	struct ion_handle *handle;
@@ -384,6 +409,12 @@
 
 	dbg_str[0] = '\0';
 
+	/*
+	 * traverse the list of heaps available in this system in priority
+	 * order.  If the heap type is supported by the client, and matches the
+	 * request of the caller allocate from it.  Repeat until allocate has
+	 * succeeded or all heaps have been tried
+	 */
 	if (WARN_ON(!len))
 		return ERR_PTR(-EINVAL);
 
@@ -392,14 +423,15 @@
 	mutex_lock(&dev->lock);
 	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
 		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
-		
+		/* if the client doesn't support this heap type */
 		if (!((1 << heap->type) & client->heap_mask))
 			continue;
-		
-		if (!((1 << heap->id) & flags))
+		/* if the caller didn't specify this heap type */
+		if (!((1 << heap->id) & heap_mask))
 			continue;
-		
-		if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
+		/* Do not allow un-secure heap if secure is specified */
+		if (secure_allocation &&
+			(heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP))
 			continue;
 		buffer = ion_buffer_create(heap, dev, len, align, flags);
 		if (!IS_ERR_OR_NULL(buffer))
@@ -409,13 +441,13 @@
 			int ret_value = snprintf(&dbg_str[dbg_str_idx],
 						len_left, "%s ", heap->name);
 			if (ret_value >= len_left) {
-				
+				/* overflow */
 				dbg_str[MAX_DBG_STR_LEN-1] = '\0';
 				dbg_str_idx = MAX_DBG_STR_LEN;
 			} else if (ret_value >= 0) {
 				dbg_str_idx += ret_value;
 			} else {
-				
+				/* error */
 				dbg_str[MAX_DBG_STR_LEN-1] = '\0';
 			}
 		}
@@ -433,9 +465,12 @@
 		return ERR_PTR(PTR_ERR(buffer));
 	}
 
-	buffer->creator = client;
 	handle = ion_handle_create(client, buffer);
 
+	/*
+	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
+	 * and ion_handle_create will take a second reference, drop one here
+	 */
 	ion_buffer_put(buffer);
 
 	if (!IS_ERR(handle)) {
@@ -615,6 +650,10 @@
 		goto out;
 	}
 
+	/*
+	 * If clients don't want a custom iova length, just use whatever
+	 * the buffer size is
+	 */
 	if (!iova_length)
 		iova_length = buffer->size;
 
@@ -717,8 +756,7 @@
 }
 EXPORT_SYMBOL(ion_unmap_iommu);
 
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
-			unsigned long flags)
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
 {
 	struct ion_buffer *buffer;
 	void *vaddr;
@@ -740,11 +778,6 @@
 		return ERR_PTR(-ENODEV);
 	}
 
-	if (ion_validate_buffer_flags(buffer, flags)) {
-		mutex_unlock(&client->lock);
-		return ERR_PTR(-EEXIST);
-	}
-
 	mutex_lock(&buffer->lock);
 	vaddr = ion_handle_kmap_get(handle);
 	mutex_unlock(&buffer->lock);
@@ -766,31 +799,6 @@
 }
 EXPORT_SYMBOL(ion_unmap_kernel);
 
-static int check_vaddr_bounds(unsigned long start, unsigned long end)
-{
-	struct mm_struct *mm = current->active_mm;
-	struct vm_area_struct *vma;
-	int ret = 1;
-
-	if (end < start)
-		goto out;
-
-	down_read(&mm->mmap_sem);
-	vma = find_vma(mm, start);
-	if (vma && vma->vm_start < end) {
-		if (start < vma->vm_start)
-			goto out_up;
-		if (end > vma->vm_end)
-			goto out_up;
-		ret = 0;
-	}
-
-out_up:
-	up_read(&mm->mmap_sem);
-out:
-	return ret;
-}
-
 int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
 			void *uaddr, unsigned long offset, unsigned long len,
 			unsigned int cmd)
@@ -856,7 +864,7 @@
 
 		if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
 			type == ION_HEAP_TYPE_CARVEOUT ||
-			type == ION_HEAP_TYPE_CP)
+			type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
 			seq_printf(s, " : %12lx", handle->buffer->priv_phys);
 		else
 			seq_printf(s, " : %12s", "N/A");
@@ -910,6 +918,8 @@
 	get_task_struct(current->group_leader);
 	task_lock(current->group_leader);
 	pid = task_pid_nr(current->group_leader);
+	/* don't bother to store task struct for kernel threads,
+	   they can't be killed anyway */
 	if (current->group_leader->flags & PF_KTHREAD) {
 		put_task_struct(current->group_leader);
 		task = NULL;
@@ -962,7 +972,6 @@
 						 &debug_client_fops);
 	mutex_unlock(&dev->lock);
 
-	pr_info("%s: create ion_client (%s) at %p\n", __func__, client->name, client);
 	return client;
 }
 
@@ -971,13 +980,11 @@
 	struct ion_device *dev = client->dev;
 	struct rb_node *n;
 
-	pr_info("%s: destroy ion_client %p (%s)\n", __func__, client, client->name);
+	pr_debug("%s: %d\n", __func__, __LINE__);
 	while ((n = rb_first(&client->handles))) {
 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
 						     node);
-		mutex_lock(&client->lock);
 		ion_handle_destroy(&handle->ref);
-		mutex_unlock(&client->lock);
 	}
 	mutex_lock(&dev->lock);
 	if (client->task)
@@ -1112,7 +1119,7 @@
 	}
 
 	mutex_lock(&buffer->lock);
-	
+	/* now map it to userspace */
 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
 
 	if (ret) {
@@ -1124,6 +1131,10 @@
 		mutex_unlock(&buffer->lock);
 
 		vma->vm_ops = &ion_vm_ops;
+		/*
+		 * move the buffer into the vm_private_data so we can access it
+		 * from vma_open/close
+		 */
 		vma->vm_private_data = buffer;
 	}
 	return ret;
@@ -1200,9 +1211,12 @@
 {
 	struct ion_buffer *buffer;
 	bool valid_handle;
-	unsigned long ion_flags = ION_SET_CACHE(CACHED);
+	unsigned long ion_flags = 0;
 	if (flags & O_DSYNC)
-		ion_flags = ION_SET_CACHE(UNCACHED);
+		ion_flags = ION_SET_UNCACHED(ion_flags);
+	else
+		ion_flags = ION_SET_CACHED(ion_flags);
+
 
 	mutex_lock(&client->lock);
 	valid_handle = ion_handle_validate(client, handle);
@@ -1214,12 +1228,6 @@
 
 	buffer = handle->buffer;
 
-	mutex_lock(&buffer->lock);
-	if (ion_validate_buffer_flags(buffer, ion_flags)) {
-		mutex_unlock(&buffer->lock);
-		return -EEXIST;
-	}
-	mutex_unlock(&buffer->lock);
 	return 0;
 }
 
@@ -1263,7 +1271,7 @@
 	dmabuf = dma_buf_get(fd);
 	if (IS_ERR_OR_NULL(dmabuf))
 		return ERR_PTR(PTR_ERR(dmabuf));
-	
+	/* if this memory came from ion */
 
 	if (dmabuf->ops != &dma_buf_ops) {
 		pr_err("%s: can not import dmabuf from another exporter\n",
@@ -1274,7 +1282,7 @@
 	buffer = dmabuf->priv;
 
 	mutex_lock(&client->lock);
-	
+	/* if a handle exists for this buffer just take a reference to it */
 	handle = ion_handle_lookup(client, buffer);
 	if (!IS_ERR_OR_NULL(handle)) {
 		ion_handle_get(handle);
@@ -1303,7 +1311,7 @@
 		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
 			return -EFAULT;
 		data.handle = ion_alloc(client, data.len, data.align,
-					     data.flags);
+					     data.heap_mask, data.flags);
 
 		if (IS_ERR(data.handle))
 			return PTR_ERR(data.handle);
@@ -1357,8 +1365,10 @@
 				   sizeof(struct ion_fd_data)))
 			return -EFAULT;
 		data.handle = ion_import_dma_buf(client, data.fd);
-		if (IS_ERR(data.handle))
+		if (IS_ERR(data.handle)) {
+			ret = PTR_ERR(data.handle);
 			data.handle = NULL;
+		}
 		if (copy_to_user((void __user *)arg, &data,
 				 sizeof(struct ion_fd_data)))
 			return -EFAULT;
@@ -1379,65 +1389,17 @@
 		return dev->custom_ioctl(client, data.cmd, data.arg);
 	}
 	case ION_IOC_CLEAN_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_CLEAN_CACHES, arg);
 	case ION_IOC_INV_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_INV_CACHES, arg);
 	case ION_IOC_CLEAN_INV_CACHES:
-	{
-		struct ion_flush_data data;
-		unsigned long start, end;
-		struct ion_handle *handle = NULL;
-		int ret;
-
-		if (copy_from_user(&data, (void __user *)arg,
-				sizeof(struct ion_flush_data)))
-			return -EFAULT;
-
-		start = (unsigned long) data.vaddr;
-		end = (unsigned long) data.vaddr + data.length;
-
-		if (check_vaddr_bounds(start, end)) {
-			pr_err("%s: virtual address %p is out of bounds\n",
-				__func__, data.vaddr);
-			return -EINVAL;
-		}
-
-		if (!data.handle) {
-			handle = ion_import_dma_buf(client, data.fd);
-			if (IS_ERR(handle)) {
-				pr_info("%s: Could not import handle: %d\n",
-					__func__, (int)handle);
-				return -EINVAL;
-			}
-		}
-
-		ret = ion_do_cache_op(client,
-					data.handle ? data.handle : handle,
-					data.vaddr, data.offset, data.length,
-					cmd);
-
-		if (!data.handle)
-			ion_free(client, handle);
-
-		if (ret < 0)
-			return ret;
-		break;
-
-	}
+		return client->dev->custom_ioctl(client,
+						ION_IOC_CLEAN_INV_CACHES, arg);
 	case ION_IOC_GET_FLAGS:
-	{
-		struct ion_flag_data data;
-		int ret;
-		if (copy_from_user(&data, (void __user *)arg,
-				   sizeof(struct ion_flag_data)))
-			return -EFAULT;
-
-		ret = ion_handle_get_flags(client, data.handle, &data.flags);
-		if (ret < 0)
-			return ret;
-		if (copy_to_user((void __user *)arg, &data,
-				 sizeof(struct ion_flag_data)))
-			return -EFAULT;
-		break;
-	}
+		return client->dev->custom_ioctl(client,
+						ION_IOC_GET_FLAGS, arg);
 	default:
 		return -ENOTTY;
 	}
@@ -1459,15 +1421,9 @@
 	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
 	struct ion_client *client;
 	char debug_name[64];
-	char task_comm[TASK_COMM_LEN];
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
-	if (current->group_leader->flags & PF_KTHREAD) {
-		snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
-	} else {
-		strcpy(debug_name, get_task_comm(task_comm, current->group_leader));
-	}
-	
+	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
 	client = ion_client_create(dev, -1, debug_name);
 	if (IS_ERR_OR_NULL(client))
 		return PTR_ERR(client);
@@ -1501,6 +1457,13 @@
 	return size;
 }
 
+/**
+ * Searches through a clients handles to find if the buffer is owned
+ * by this client. Used for debug output.
+ * @param client pointer to candidate owner of buffer
+ * @param buf pointer to buffer that we are trying to find the owner of
+ * @return 1 if found, 0 otherwise
+ */
 static int ion_debug_find_buffer_owner(const struct ion_client *client,
 				       const struct ion_buffer *buf)
 {
@@ -1516,6 +1479,12 @@
 	return 0;
 }
 
+/**
+ * Adds mem_map_data pointer to the tree of mem_map
+ * Used for debug output.
+ * @param mem_map The mem_map tree
+ * @param data The new data to add to the tree
+ */
 static void ion_debug_mem_map_add(struct rb_root *mem_map,
 				  struct mem_map_data *data)
 {
@@ -1540,6 +1509,12 @@
 	rb_insert_color(&data->node, mem_map);
 }
 
+/**
+ * Search for an owner of a buffer by iterating over all ION clients.
+ * @param dev ion device containing pointers to all the clients.
+ * @param buffer pointer to buffer we are trying to find the owner of.
+ * @return name of owner.
+ */
 const char *ion_debug_locate_owner(const struct ion_device *dev,
 					 const struct ion_buffer *buffer)
 {
@@ -1556,6 +1531,12 @@
 	return client_name;
 }
 
+/**
+ * Create a mem_map of the heap.
+ * @param s seq_file to log error message to.
+ * @param heap The heap to create mem_map for.
+ * @param mem_map The mem map to be created.
+ */
 void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
 			      struct rb_root *mem_map)
 {
@@ -1573,32 +1554,19 @@
 					   "Part of memory map will not be logged\n");
 				break;
 			}
-			if (heap->id == ION_IOMMU_HEAP_ID) {
-				data->addr = (unsigned long)buffer;
-			} else {
-				data->addr = buffer->priv_phys;
-				data->addr_end = buffer->priv_phys + buffer->size-1;
-			}
+			data->addr = buffer->priv_phys;
+			data->addr_end = buffer->priv_phys + buffer->size-1;
 			data->size = buffer->size;
 			data->client_name = ion_debug_locate_owner(dev, buffer);
-
-			{
-				
-				struct rb_node *p = NULL;
-				struct ion_client *entry = NULL;
-
-				for (p = rb_first(&dev->clients); p && !data->creator_name;
-						p = rb_next(p)) {
-					entry = rb_entry(p, struct ion_client, node);
-					if (entry == buffer->creator)
-						data->creator_name = entry->name;
-				}
-			}
 			ion_debug_mem_map_add(mem_map, data);
 		}
 	}
 }
 
+/**
+ * Free the memory allocated by ion_debug_mem_map_create
+ * @param mem_map The mem map to free.
+ */
 static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
 {
 	if (mem_map) {
@@ -1612,6 +1580,11 @@
 	}
 }
 
+/**
+ * Print heap debug information.
+ * @param s seq_file to log message to.
+ * @param heap pointer to heap that we will print debug information for.
+ */
 static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
 {
 	if (heap->ops->print_debug) {
@@ -1707,10 +1680,14 @@
 	struct rb_node *n;
 	int ret_val = 0;
 
+	/*
+	 * traverse the list of heaps available in this system
+	 * and find the heap that is specified.
+	 */
 	mutex_lock(&dev->lock);
 	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
 		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
-		if (heap->type != ION_HEAP_TYPE_CP)
+		if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
 			continue;
 		if (ION_HEAP(heap->id) != heap_id)
 			continue;
@@ -1731,10 +1708,14 @@
 	struct rb_node *n;
 	int ret_val = 0;
 
+	/*
+	 * traverse the list of heaps available in this system
+	 * and find the heap that is specified.
+	 */
 	mutex_lock(&dev->lock);
 	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
 		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
-		if (heap->type != ION_HEAP_TYPE_CP)
+		if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
 			continue;
 		if (ION_HEAP(heap->id) != heap_id)
 			continue;
@@ -1755,8 +1736,8 @@
 	struct rb_node *n;
 	struct rb_node *n2;
 
-	
-	seq_printf(s, "%16.s %12.s %16.s %16.s %16.s\n", "buffer", "physical", "heap", "size",
+	/* mark all buffers as 1 */
+	seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
 		"ref cnt");
 	mutex_lock(&dev->lock);
 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
@@ -1766,7 +1747,7 @@
 		buf->marked = 1;
 	}
 
-	
+	/* now see which buffers we can access */
 	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
 		struct ion_client *client = rb_entry(n, struct ion_client,
 						     node);
@@ -1783,26 +1764,15 @@
 
 	}
 
-	
+	/* And anyone still marked as a 1 means a leaked handle somewhere */
 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
 		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
 						     node);
-		enum ion_heap_type type = buf->heap->type;
 
-		if (buf->marked == 1) {
-			seq_printf(s, "%16.x", (int)buf);
-
-			if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
-				type == ION_HEAP_TYPE_CARVEOUT ||
-				type == ION_HEAP_TYPE_CP)
-				seq_printf(s, " %12lx", buf->priv_phys);
-			else
-				seq_printf(s, " %12s", "N/A");
-
-			seq_printf(s, " %16.s %16.x %16.d\n",
-				buf->heap->name, buf->size,
+		if (buf->marked == 1)
+			seq_printf(s, "%16.x %16.s %16.x %16.d\n",
+				(int)buf, buf->heap->name, buf->size,
 				atomic_read(&buf->ref.refcount));
-		}
 	}
 	mutex_unlock(&dev->lock);
 	return 0;
@@ -1861,7 +1831,7 @@
 void ion_device_destroy(struct ion_device *dev)
 {
 	misc_deregister(&dev->dev);
-	
+	/* XXX need to free the heaps and clients ? */
 	kfree(dev);
 }
 
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 5f7fe37..a808cc9 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_carveout_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -23,7 +23,6 @@
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 #include <linux/iommu.h>
 #include <linux/seq_file.h>
 #include "ion_priv.h"
@@ -31,6 +30,7 @@
 #include <mach/iommu_domains.h>
 #include <asm/mach/map.h>
 #include <asm/cacheflush.h>
+#include <linux/msm_ion.h>
 
 struct ion_carveout_heap {
 	struct ion_heap heap;
@@ -286,19 +286,18 @@
 		struct rb_node *n;
 
 		seq_printf(s, "\nMemory Map\n");
-		seq_printf(s, "%16.s %16.s %14.s %14.s %14.s\n",
-			   "client", "creator", "start address", "end address",
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
 			   "size (hex)");
 
 		for (n = rb_first(mem_map); n; n = rb_next(n)) {
 			struct mem_map_data *data =
 					rb_entry(n, struct mem_map_data, node);
 			const char *client_name = "(null)";
-			const char *creator_name = "(null)";
 
 			if (last_end < data->addr) {
-				seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
-					   "FREE", "NA", last_end, data->addr-1,
+				seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+					   "FREE", last_end, data->addr-1,
 					   data->addr-last_end,
 					   data->addr-last_end);
 			}
@@ -306,17 +305,14 @@
 			if (data->client_name)
 				client_name = data->client_name;
 
-			if (data->creator_name)
-				creator_name = data->creator_name;
-
-			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
-				   client_name, creator_name, data->addr,
+			seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+				   client_name, data->addr,
 				   data->addr_end,
 				   data->size, data->size);
 			last_end = data->addr_end+1;
 		}
 		if (last_end < end) {
-			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n", "FREE", "NA",
+			seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
 				last_end, end-1, end-last_end, end-last_end);
 		}
 	}
@@ -361,7 +357,7 @@
 		goto out1;
 	}
 
-	sglist = vmalloc(sizeof(*sglist));
+	sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
 	if (!sglist)
 		goto out1;
 
@@ -385,13 +381,13 @@
 		if (ret)
 			goto out2;
 	}
-	vfree(sglist);
+	kfree(sglist);
 	return ret;
 
 out2:
 	iommu_unmap_range(domain, data->iova_addr, buffer->size);
 out1:
-	vfree(sglist);
+	kfree(sglist);
 	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
 				data->mapped_size);
 
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
new file mode 100644
index 0000000..bef6b6f
--- /dev/null
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -0,0 +1,342 @@
+/*
+ * drivers/gpu/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+#include <mach/iommu_domains.h>
+
+#include <asm/cacheflush.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_buffer_info {
+	void *cpu_addr;
+	dma_addr_t handle;
+	struct sg_table *table;
+	bool is_cached;
+};
+
+static int cma_heap_has_outer_cache;
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			void *cpu_addr, dma_addr_t handle, size_t size)
+{
+	struct page *page = virt_to_page(cpu_addr);
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct device *dev = heap->priv;
+	struct ion_cma_buffer_info *info;
+
+	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(dev, "Can't allocate buffer info\n");
+		return ION_CMA_ALLOCATE_FAILED;
+	}
+
+	if (!ION_IS_CACHED(flags))
+		info->cpu_addr = dma_alloc_writecombine(dev, len,
+					&(info->handle), 0);
+	else
+		info->cpu_addr = dma_alloc_nonconsistent(dev, len,
+					&(info->handle), 0);
+
+	if (!info->cpu_addr) {
+		dev_err(dev, "Fail to allocate buffer\n");
+		goto err;
+	}
+
+	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+
+	info->is_cached = ION_IS_CACHED(flags);
+
+	ion_cma_get_sgtable(dev,
+			info->table, info->cpu_addr, info->handle, len);
+
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	dev_dbg(dev, "Allocate buffer %p\n", buffer);
+	return 0;
+
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+	struct device *dev = buffer->heap->priv;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Release buffer %p\n", buffer);
+	/* release memory */
+	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+	/* release sg table */
+	kfree(info->table);
+	kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			ion_phys_addr_t *addr, size_t *len)
+{
+	struct device *dev = heap->priv;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
+		info->handle);
+
+	*addr = info->handle;
+	*len = buffer->size;
+
+	return 0;
+}
+
+struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+					 struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+			       struct ion_buffer *buffer)
+{
+	return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+			struct vm_area_struct *vma)
+{
+	struct device *dev = buffer->heap->priv;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	if (info->is_cached)
+		return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
+				info->handle, buffer->size);
+	else
+		return dma_mmap_writecombine(dev, vma, info->cpu_addr,
+				info->handle, buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+				struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	return;
+}
+
+int ion_cma_map_iommu(struct ion_buffer *buffer,
+				struct ion_iommu_map *data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags)
+{
+	int ret = 0;
+	struct iommu_domain *domain;
+	unsigned long extra;
+	unsigned long extra_iova_addr;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+	struct sg_table *table = info->table;
+	int prot = IOMMU_WRITE | IOMMU_READ;
+
+	data->mapped_size = iova_length;
+
+	if (!msm_use_iommu()) {
+		data->iova_addr = info->handle;
+		return 0;
+	}
+
+	extra = iova_length - buffer->size;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align,
+						&data->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -EINVAL;
+		goto out1;
+	}
+
+	ret = iommu_map_range(domain, data->iova_addr, table->sgl,
+				buffer->size, prot);
+
+	if (ret) {
+		pr_err("%s: could not map %lx in domain %p\n",
+			__func__, data->iova_addr, domain);
+		goto out1;
+	}
+
+	extra_iova_addr = data->iova_addr + buffer->size;
+	if (extra) {
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+						prot);
+		if (ret)
+			goto out2;
+	}
+	return ret;
+
+out2:
+	iommu_unmap_range(domain, data->iova_addr, buffer->size);
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+out:
+	return ret;
+}
+
+
+void ion_cma_unmap_iommu(struct ion_iommu_map *data)
+{
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	if (!msm_use_iommu())
+		return;
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
+int ion_cma_cache_ops(struct ion_heap *heap,
+			struct ion_buffer *buffer, void *vaddr,
+			unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
+		break;
+	case ION_IOC_INV_CACHES:
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (cma_heap_has_outer_cache) {
+		struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+		outer_cache_op(info->handle, info->handle + length);
+	}
+
+	return 0;
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+	.allocate = ion_cma_allocate,
+	.free = ion_cma_free,
+	.map_dma = ion_cma_heap_map_dma,
+	.unmap_dma = ion_cma_heap_unmap_dma,
+	.phys = ion_cma_phys,
+	.map_user = ion_cma_mmap,
+	.map_kernel = ion_cma_map_kernel,
+	.unmap_kernel = ion_cma_unmap_kernel,
+	.map_iommu = ion_cma_map_iommu,
+	.unmap_iommu = ion_cma_unmap_iommu,
+	.cache_op = ion_cma_cache_ops,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+
+	heap->ops = &ion_cma_ops;
+	/* set device as private heaps data, later it will be
+	 * used to make the link with reserved CMA memory */
+	heap->priv = data->priv;
+	heap->type = ION_HEAP_TYPE_DMA;
+	cma_heap_has_outer_cache = data->has_outer_cache;
+	return heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index d0d6a95..017b756 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_cp_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -15,19 +15,19 @@
  *
  */
 #include <linux/spinlock.h>
-
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/genalloc.h>
 #include <linux/io.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 #include <linux/memory_alloc.h>
 #include <linux/seq_file.h>
 #include <linux/fmem.h>
 #include <linux/iommu.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/mach/map.h>
 
@@ -41,6 +41,39 @@
 #include <asm/cacheflush.h>
 
 #include "msm/ion_cp_common.h"
+/**
+ * struct ion_cp_heap - container for the heap and shared heap data
+
+ * @heap:	the heap information structure
+ * @pool:	memory pool to allocate from.
+ * @base:	the base address of the memory pool.
+ * @permission_type:	Identifier for the memory used by SCM for protecting
+ *			and unprotecting memory.
+ * @secure_base:	Base address used when securing a heap that is shared.
+ * @secure_size:	Size used when securing a heap that is shared.
+ * @lock:	mutex to protect shared access.
+ * @heap_protected:	Indicates whether heap has been protected or not.
+ * @allocated_bytes:	the total number of allocated bytes from the pool.
+ * @total_size:	the total size of the memory pool.
+ * @request_region:	function pointer to call when first mapping of memory
+ *			occurs.
+ * @release_region:	function pointer to call when last mapping of memory
+ *			unmapped.
+ * @bus_id: token used with request/release region.
+ * @kmap_cached_count:	the total number of times this heap has been mapped in
+ *			kernel space (cached).
+ * @kmap_uncached_count:the total number of times this heap has been mapped in
+ *			kernel space (un-cached).
+ * @umap_count:	the total number of times this heap has been mapped in
+ *		user space.
+ * @iommu_iova: saved iova when mapping full heap at once.
+ * @iommu_partition: partition used to map full heap.
+ * @reusable: indicates if the memory should be reused via fmem.
+ * @reserved_vrange: reserved virtual address range for use with fmem
+ * @iommu_map_all:	Indicates whether we should map whole heap into IOMMU.
+ * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
+*/
 struct ion_cp_heap {
 	struct ion_heap heap;
 	struct gen_pool *pool;
@@ -52,8 +85,8 @@
 	unsigned int heap_protected;
 	unsigned long allocated_bytes;
 	unsigned long total_size;
-	int (*request_region)(void *);
-	int (*release_region)(void *);
+	int (*heap_request_region)(void *);
+	int (*heap_release_region)(void *);
 	void *bus_id;
 	unsigned long kmap_cached_count;
 	unsigned long kmap_uncached_count;
@@ -66,6 +99,11 @@
 	int iommu_2x_map_domain;
 	unsigned int has_outer_cache;
 	atomic_t protect_cnt;
+	void *cpu_addr;
+	size_t heap_size;
+	dma_addr_t handle;
+	int cma;
+	int disallow_non_secure_allocation;
 };
 
 enum {
@@ -73,6 +111,8 @@
 	HEAP_PROTECTED = 1,
 };
 
+#define DMA_ALLOC_RETRIES	5
+
 static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
 			unsigned int permission_type, int version,
 			void *data);
@@ -81,12 +121,120 @@
 				unsigned int permission_type, int version,
 				void *data);
 
+static int allocate_heap_memory(struct ion_heap *heap)
+{
+	struct device *dev = heap->priv;
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+	int ret;
+	int tries = 0;
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+
+	if (cp_heap->cpu_addr)
+		return 0;
+
+	while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_RETRIES)) {
+		cp_heap->cpu_addr = dma_alloc_attrs(dev,
+						cp_heap->heap_size,
+						&(cp_heap->handle),
+						0,
+						&attrs);
+		if (!cp_heap->cpu_addr)
+			msleep(20);
+	}
+
+	if (!cp_heap->cpu_addr)
+		goto out;
+
+	cp_heap->base = cp_heap->handle;
+
+	cp_heap->pool = gen_pool_create(12, -1);
+	if (!cp_heap->pool)
+		goto out_free;
+
+	ret = gen_pool_add(cp_heap->pool, cp_heap->base,
+				cp_heap->heap_size, -1);
+	if (ret < 0)
+		goto out_pool;
+
+	return 0;
+
+out_pool:
+	gen_pool_destroy(cp_heap->pool);
+out_free:
+	dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
+				cp_heap->handle);
+out:
+	return ION_CP_ALLOCATE_FAIL;
+}
+
+static void free_heap_memory(struct ion_heap *heap)
+{
+	struct device *dev = heap->priv;
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	/* release memory */
+	dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
+				cp_heap->handle);
+	gen_pool_destroy(cp_heap->pool);
+	cp_heap->pool = NULL;
+	cp_heap->cpu_addr = 0;
+}
+
+
+
+/**
+ * Get the total number of kernel mappings.
+ * Must be called with heap->lock locked.
+ */
 static unsigned long ion_cp_get_total_kmap_count(
 					const struct ion_cp_heap *cp_heap)
 {
 	return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
 }
 
+static int ion_on_first_alloc(struct ion_heap *heap)
+{
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+	int ret_value;
+
+	if (cp_heap->reusable) {
+		ret_value = fmem_set_state(FMEM_C_STATE);
+		if (ret_value)
+			return 1;
+	}
+
+	if (cp_heap->cma) {
+		ret_value = allocate_heap_memory(heap);
+		if (ret_value)
+			return 1;
+	}
+	return 0;
+}
+
+static void ion_on_last_free(struct ion_heap *heap)
+{
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
+
+	if (cp_heap->reusable)
+		if (fmem_set_state(FMEM_T_STATE) != 0)
+			pr_err("%s: unable to transition heap to T-state\n",
+				__func__);
+
+	if (cp_heap->cma)
+		free_heap_memory(heap);
+}
+
+/**
+ * Protects memory if heap is unsecured heap. Also ensures that we are in
+ * the correct FMEM state if this heap is a reusable heap.
+ * Must be called with heap->lock locked.
+ */
 static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
 {
 	struct ion_cp_heap *cp_heap =
@@ -94,12 +242,10 @@
 	int ret_value = 0;
 
 	if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
-		
-		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
-			ret_value = fmem_set_state(FMEM_C_STATE);
-			if (ret_value)
+		/* Make sure we are in C state when the heap is protected. */
+		if (!cp_heap->allocated_bytes)
+			if (ion_on_first_alloc(heap))
 				goto out;
-		}
 
 		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
 				cp_heap->secure_size, cp_heap->permission_type,
@@ -108,11 +254,9 @@
 			pr_err("Failed to protect memory for heap %s - "
 				"error code: %d\n", heap->name, ret_value);
 
-			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
-				if (fmem_set_state(FMEM_T_STATE) != 0)
-					pr_err("%s: unable to transition heap to T-state\n",
-						__func__);
-			}
+			if (!cp_heap->allocated_bytes)
+				ion_on_last_free(heap);
+
 			atomic_dec(&cp_heap->protect_cnt);
 		} else {
 			cp_heap->heap_protected = HEAP_PROTECTED;
@@ -127,6 +271,11 @@
 	return ret_value;
 }
 
+/**
+ * Unprotects memory if heap is secure heap. Also ensures that we are in
+ * the correct FMEM state if this heap is a reusable heap.
+ * Must be called with heap->lock locked.
+ */
 static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
 {
 	struct ion_cp_heap *cp_heap =
@@ -144,11 +293,8 @@
 			pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
 				(unsigned int) cp_heap->base);
 
-			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
-				if (fmem_set_state(FMEM_T_STATE) != 0)
-					pr_err("%s: unable to transition heap to T-state",
-						__func__);
-			}
+			if (!cp_heap->allocated_bytes)
+				ion_on_last_free(heap);
 		}
 	}
 	pr_debug("%s: protect count is %d\n", __func__,
@@ -163,6 +309,7 @@
 {
 	unsigned long offset;
 	unsigned long secure_allocation = flags & ION_SECURE;
+	unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
 
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
@@ -175,6 +322,14 @@
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
+	if (!force_contig && !secure_allocation &&
+	     cp_heap->disallow_non_secure_allocation) {
+		mutex_unlock(&cp_heap->lock);
+		pr_debug("%s: non-secure allocation disallowed from this heap\n",
+			__func__);
+		return ION_CP_ALLOCATE_FAIL;
+	}
+
 	if (secure_allocation &&
 	    (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
 		mutex_unlock(&cp_heap->lock);
@@ -185,12 +340,15 @@
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
-	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
-		if (fmem_set_state(FMEM_C_STATE) != 0) {
+	/*
+	 * if this is the first reusable allocation, transition
+	 * the heap
+	 */
+	if (!cp_heap->allocated_bytes)
+		if (ion_on_first_alloc(heap)) {
 			mutex_unlock(&cp_heap->lock);
 			return ION_RESERVED_ALLOCATE_FAIL;
 		}
-	}
 
 	cp_heap->allocated_bytes += size;
 	mutex_unlock(&cp_heap->lock);
@@ -209,13 +367,9 @@
 				__func__, heap->name,
 				cp_heap->total_size -
 				cp_heap->allocated_bytes, size);
-
-		if (cp_heap->reusable && !cp_heap->allocated_bytes &&
-		    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
-			if (fmem_set_state(FMEM_T_STATE) != 0)
-				pr_err("%s: unable to transition heap to T-state\n",
-					__func__);
-		}
+		if (!cp_heap->allocated_bytes &&
+			cp_heap->heap_protected == HEAP_NOT_PROTECTED)
+			ion_on_last_free(heap);
 		mutex_unlock(&cp_heap->lock);
 
 		return ION_CP_ALLOCATE_FAIL;
@@ -260,14 +414,11 @@
 	mutex_lock(&cp_heap->lock);
 	cp_heap->allocated_bytes -= size;
 
-	if (cp_heap->reusable && !cp_heap->allocated_bytes &&
-	    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
-		if (fmem_set_state(FMEM_T_STATE) != 0)
-			pr_err("%s: unable to transition heap to T-state\n",
-				__func__);
-	}
+	if (!cp_heap->allocated_bytes &&
+		cp_heap->heap_protected == HEAP_NOT_PROTECTED)
+		ion_on_last_free(heap);
 
-	
+	/* Unmap everything if we previously mapped the whole heap at once. */
 	if (!cp_heap->allocated_bytes) {
 		unsigned int i;
 		for (i = 0; i < MAX_DOMAINS; ++i) {
@@ -353,21 +504,29 @@
 	buffer->sg_table = 0;
 }
 
+/**
+ * Call request region for SMI memory of this is the first mapping.
+ */
 static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
 {
 	int ret_value = 0;
 	if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
-		if (cp_heap->request_region)
-			ret_value = cp_heap->request_region(cp_heap->bus_id);
+		if (cp_heap->heap_request_region)
+			ret_value = cp_heap->heap_request_region(
+					cp_heap->bus_id);
 	return ret_value;
 }
 
+/**
+ * Call release region for SMI memory of this is the last un-mapping.
+ */
 static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
 {
 	int ret_value = 0;
 	if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
-		if (cp_heap->release_region)
-			ret_value = cp_heap->release_region(cp_heap->bus_id);
+		if (cp_heap->heap_release_region)
+			ret_value = cp_heap->heap_release_region(
+					cp_heap->bus_id);
 	return ret_value;
 }
 
@@ -412,7 +571,24 @@
 		if (cp_heap->reusable) {
 			ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
 				cp_heap->reserved_vrange, buffer->flags);
+		} else if (cp_heap->cma) {
+			int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+			struct page **pages = vmalloc(
+						sizeof(struct page *) * npages);
+			int i;
+			pgprot_t pgprot;
 
+			if (ION_IS_CACHED(buffer->flags))
+				pgprot = PAGE_KERNEL;
+			else
+				pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+			for (i = 0; i < npages; i++) {
+				pages[i] = phys_to_page(buffer->priv_phys +
+						i * PAGE_SIZE);
+			}
+			ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
+			vfree(pages);
 		} else {
 			if (ION_IS_CACHED(buffer->flags))
 				ret_value = ioremap_cached(buffer->priv_phys,
@@ -443,6 +619,8 @@
 
 	if (cp_heap->reusable)
 		unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
+	else if (cp_heap->cma)
+		vunmap(buffer->vaddr);
 	else
 		__arm_iounmap(buffer->vaddr);
 
@@ -569,19 +747,18 @@
 		struct rb_node *n;
 
 		seq_printf(s, "\nMemory Map\n");
-		seq_printf(s, "%16.s %16.s %14.s %14.s %14.s\n",
-			   "client", "creator", "start address", "end address",
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
 			   "size (hex)");
 
 		for (n = rb_first(mem_map); n; n = rb_next(n)) {
 			struct mem_map_data *data =
 					rb_entry(n, struct mem_map_data, node);
 			const char *client_name = "(null)";
-			const char *creator_name = "(null)";
 
 			if (last_end < data->addr) {
-				seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
-					   "FREE", "NA", last_end, data->addr-1,
+				seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+					   "FREE", last_end, data->addr-1,
 					   data->addr-last_end,
 					   data->addr-last_end);
 			}
@@ -589,17 +766,14 @@
 			if (data->client_name)
 				client_name = data->client_name;
 
-			if (data->creator_name)
-				creator_name = data->creator_name;
-
-			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n",
-				   client_name, creator_name, data->addr,
+			seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+				   client_name, data->addr,
 				   data->addr_end,
 				   data->size, data->size);
 			last_end = data->addr_end+1;
 		}
 		if (last_end < end) {
-			seq_printf(s, "%16.s %16.s %14lx %14lx %14lu (%lx)\n", "FREE", "NA",
+			seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
 				last_end, end-1, end-last_end, end-last_end);
 		}
 	}
@@ -646,6 +820,9 @@
 	unsigned long virt_addr_len = cp_heap->total_size;
 	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
 
+	/* If we are mapping into the video domain we need to map twice the
+	 * size of the heap to account for prefetch issue in video core.
+	 */
 	if (domain_num == cp_heap->iommu_2x_map_domain)
 		virt_addr_len <<= 1;
 
@@ -728,7 +905,7 @@
 	}
 
 	if (cp_heap->iommu_iova[domain_num]) {
-		
+		/* Already mapped. */
 		unsigned long offset = buffer->priv_phys - cp_heap->base;
 		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
 		return 0;
@@ -740,6 +917,10 @@
 			data->iova_addr =
 				cp_heap->iommu_iova[domain_num] + offset;
 			cp_heap->iommu_partition[domain_num] = partition_num;
+			/*
+			clear delayed map flag so that we don't interfere
+			with this feature (we are already delaying).
+			*/
 			data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
 			return 0;
 		} else {
@@ -805,6 +986,8 @@
 
 	domain_num = iommu_map_domain(data);
 
+	/* If we are mapping everything we'll wait to unmap until everything
+	   is freed. */
 	if (cp_heap->iommu_iova[domain_num])
 		return;
 
@@ -853,14 +1036,6 @@
 
 	mutex_init(&cp_heap->lock);
 
-	cp_heap->pool = gen_pool_create(12, -1);
-	if (!cp_heap->pool)
-		goto free_heap;
-
-	cp_heap->base = heap_data->base;
-	ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1);
-	if (ret < 0)
-		goto destroy_pool;
 
 	cp_heap->allocated_bytes = 0;
 	cp_heap->umap_count = 0;
@@ -868,11 +1043,13 @@
 	cp_heap->kmap_uncached_count = 0;
 	cp_heap->total_size = heap_data->size;
 	cp_heap->heap.ops = &cp_heap_ops;
-	cp_heap->heap.type = ION_HEAP_TYPE_CP;
+	cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
 	cp_heap->heap_protected = HEAP_NOT_PROTECTED;
-	cp_heap->secure_base = cp_heap->base;
+	cp_heap->secure_base = heap_data->base;
 	cp_heap->secure_size = heap_data->size;
 	cp_heap->has_outer_cache = heap_data->has_outer_cache;
+	cp_heap->heap_size = heap_data->size;
+
 	atomic_set(&cp_heap->protect_cnt, 0);
 	if (heap_data->extra_data) {
 		struct ion_cp_heap_pdata *extra_data =
@@ -887,16 +1064,37 @@
 		if (extra_data->setup_region)
 			cp_heap->bus_id = extra_data->setup_region();
 		if (extra_data->request_region)
-			cp_heap->request_region = extra_data->request_region;
+			cp_heap->heap_request_region =
+				extra_data->request_region;
 		if (extra_data->release_region)
-			cp_heap->release_region = extra_data->release_region;
+			cp_heap->heap_release_region =
+				extra_data->release_region;
 		cp_heap->iommu_map_all =
 				extra_data->iommu_map_all;
 		cp_heap->iommu_2x_map_domain =
 				extra_data->iommu_2x_map_domain;
+		cp_heap->cma = extra_data->is_cma;
+		cp_heap->disallow_non_secure_allocation =
+			extra_data->no_nonsecure_alloc;
 
 	}
 
+	if (cp_heap->cma) {
+		cp_heap->pool = NULL;
+		cp_heap->cpu_addr = 0;
+		cp_heap->heap.priv = heap_data->priv;
+	} else {
+		cp_heap->pool = gen_pool_create(12, -1);
+		if (!cp_heap->pool)
+			goto free_heap;
+
+		cp_heap->base = heap_data->base;
+		ret = gen_pool_add(cp_heap->pool, cp_heap->base,
+					heap_data->size, -1);
+		if (ret < 0)
+			goto destroy_pool;
+
+	}
 	return &cp_heap->heap;
 
 destroy_pool:
@@ -927,6 +1125,7 @@
 	*size = cp_heap->total_size;
 }
 
+/*  SCM related code for locking down memory for content protection */
 
 #define SCM_CP_LOCK_CMD_ID	0x1
 #define SCM_CP_PROTECT		0x1
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 6ea49db..98c1a8c 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -18,12 +18,13 @@
 #include <linux/err.h>
 #include <linux/ion.h>
 #include "ion_priv.h"
+#include <linux/msm_ion.h>
 
 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
 {
 	struct ion_heap *heap = NULL;
 
-	switch (heap_data->type) {
+	switch ((int) heap_data->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
 		heap = ion_system_contig_heap_create(heap_data);
 		break;
@@ -39,6 +40,11 @@
 	case ION_HEAP_TYPE_CP:
 		heap = ion_cp_heap_create(heap_data);
 		break;
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_DMA:
+		heap = ion_cma_heap_create(heap_data);
+		break;
+#endif
 	default:
 		pr_err("%s: Invalid heap type %d\n", __func__,
 		       heap_data->type);
@@ -54,6 +60,7 @@
 
 	heap->name = heap_data->name;
 	heap->id = heap_data->id;
+	heap->priv = heap_data->priv;
 	return heap;
 }
 
@@ -62,7 +69,7 @@
 	if (!heap)
 		return;
 
-	switch (heap->type) {
+	switch ((int) heap->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
 		ion_system_contig_heap_destroy(heap);
 		break;
@@ -78,6 +85,11 @@
 	case ION_HEAP_TYPE_CP:
 		ion_cp_heap_destroy(heap);
 		break;
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_DMA:
+		ion_cma_heap_destroy(heap);
+		break;
+#endif
 	default:
 		pr_err("%s: Invalid heap type %d\n", __func__,
 		       heap->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 5f6780b..3a32390 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,13 +12,15 @@
  */
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/iommu.h>
 #include <linux/pfn.h>
+#include <linux/dma-mapping.h>
 #include "ion_priv.h"
 
 #include <asm/mach/map.h>
@@ -31,40 +33,107 @@
 	unsigned int has_outer_cache;
 };
 
+/*
+ * We will attempt to allocate high-order pages and store those in an
+ * sg_list. However, some APIs expect an array of struct page * where
+ * each page is of size PAGE_SIZE. We use this extra structure to
+ * carry around an array of such pages (derived from the high-order
+ * pages with nth_page).
+ */
 struct ion_iommu_priv_data {
 	struct page **pages;
 	int nrpages;
 	unsigned long size;
 };
 
+#define MAX_VMAP_RETRIES 10
+
 atomic_t v = ATOMIC_INIT(0);
 
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+
+struct page_info {
+	struct page *page;
+	unsigned int order;
+	struct list_head list;
+};
+
+static unsigned int order_to_size(int order)
+{
+	return PAGE_SIZE << order;
+}
+
+static struct page_info *alloc_largest_available(unsigned long size,
+						unsigned int max_order)
+{
+	struct page *page;
+	struct page_info *info;
+	int i;
+
+	for (i = 0; i < num_orders; i++) {
+		if (size < order_to_size(orders[i]))
+			continue;
+		if (max_order < orders[i])
+			continue;
+
+		page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | __GFP_COMP,
+				orders[i]);
+		if (!page)
+			continue;
+
+		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+		info->page = page;
+		info->order = orders[i];
+		return info;
+	}
+	return NULL;
+}
+
 static int ion_iommu_heap_allocate(struct ion_heap *heap,
 				      struct ion_buffer *buffer,
 				      unsigned long size, unsigned long align,
 				      unsigned long flags)
 {
-	int ret = 0, i;
+	int ret, i;
+	struct list_head pages_list;
+	struct page_info *info, *tmp_info;
 	struct ion_iommu_priv_data *data = NULL;
-	pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
-	void *ptr = NULL;
 
 	if (msm_use_iommu()) {
 		struct scatterlist *sg;
 		struct sg_table *table;
-		unsigned int i;
+		int j;
+		void *ptr = NULL;
+		unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
+		long size_remaining = PAGE_ALIGN(size);
+		unsigned int max_order = orders[0];
 
 		data = kmalloc(sizeof(*data), GFP_KERNEL);
 		if (!data)
 			return -ENOMEM;
 
+		INIT_LIST_HEAD(&pages_list);
+		while (size_remaining > 0) {
+			info = alloc_largest_available(size_remaining,
+						max_order);
+			if (!info) {
+				ret = -ENOMEM;
+				goto err_free_data;
+			}
+			list_add_tail(&info->list, &pages_list);
+			size_remaining -= order_to_size(info->order);
+			max_order = info->order;
+			num_large_pages++;
+		}
+
 		data->size = PFN_ALIGN(size);
 		data->nrpages = data->size >> PAGE_SHIFT;
 		data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
 				GFP_KERNEL);
 		if (!data->pages) {
 			ret = -ENOMEM;
-			goto err1;
+			goto err_free_data;
 		}
 
 		table = buffer->sg_table =
@@ -74,30 +143,65 @@
 			ret = -ENOMEM;
 			goto err1;
 		}
-		ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
+		ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
 		if (ret)
 			goto err2;
 
-		for_each_sg(table->sgl, sg, table->nents, i) {
-			data->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
-			if (!data->pages[i])
-				goto err3;
-
-			sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
+		i = 0;
+		sg = table->sgl;
+		list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
+			struct page *page = info->page;
+			sg_set_page(sg, page, order_to_size(info->order), 0);
+			sg_dma_address(sg) = sg_phys(sg);
+			sg = sg_next(sg);
+			for (j = 0; j < (1 << info->order); ++j)
+				data->pages[i++] = nth_page(page, j);
+			list_del(&info->list);
+			kfree(info);
 		}
 
-		ptr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
-		if (ptr != NULL) {
-			memset(ptr, 0, data->size);
-			dmac_flush_range(ptr, ptr + data->size);
+		/*
+		 * As an optimization, we omit __GFP_ZERO from
+		 * alloc_page above and manually zero out all of the
+		 * pages in one fell swoop here. To safeguard against
+		 * insufficient vmalloc space, we only vmap
+		 * `npages_to_vmap' at a time, starting with a
+		 * conservative estimate of 1/8 of the total number of
+		 * vmalloc pages available. Note that the `pages'
+		 * array is composed of all 4K pages, irrespective of
+		 * the size of the pages on the sg list.
+		 */
+		npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
+			>> PAGE_SHIFT;
+		total_pages = data->nrpages;
+		for (i = 0; i < total_pages; i += npages_to_vmap) {
+			npages_to_vmap = min(npages_to_vmap, total_pages - i);
+			for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
+			     ++j) {
+				ptr = vmap(&data->pages[i], npages_to_vmap,
+					VM_IOREMAP, pgprot_kernel);
+				if (ptr)
+					break;
+				else
+					npages_to_vmap >>= 1;
+			}
+			if (!ptr) {
+				pr_err("Couldn't vmap the pages for zeroing\n");
+				ret = -ENOMEM;
+				goto err3;
+			}
+			memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
 			vunmap(ptr);
-		} else
-			pr_err("%s: vmap() failed\n", __func__);
+		}
+
+		if (!ION_IS_CACHED(flags))
+			dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+						DMA_BIDIRECTIONAL);
 
 		buffer->priv_virt = data;
-		
+
 		atomic_add(data->size, &v);
-		
+
 		return 0;
 
 	} else {
@@ -110,31 +214,40 @@
 err2:
 	kfree(buffer->sg_table);
 	buffer->sg_table = 0;
-
-	for (i = 0; i < data->nrpages; i++) {
-		if (data->pages[i])
-			__free_page(data->pages[i]);
-	}
-	kfree(data->pages);
 err1:
+	kfree(data->pages);
+err_free_data:
 	kfree(data);
+
+	list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
+		if (info->page)
+			__free_pages(info->page, info->order);
+		list_del(&info->list);
+		kfree(info);
+	}
 	return ret;
 }
 
 static void ion_iommu_heap_free(struct ion_buffer *buffer)
 {
-	struct ion_iommu_priv_data *data = buffer->priv_virt;
 	int i;
+	struct scatterlist *sg;
+	struct sg_table *table = buffer->sg_table;
+	struct ion_iommu_priv_data *data = buffer->priv_virt;
 
+	if (!table)
+		return;
 	if (!data)
 		return;
 
-	for (i = 0; i < data->nrpages; i++)
-		__free_page(data->pages[i]);
+	for_each_sg(table->sgl, sg, table->nents, i)
+		__free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
 
-	
+	sg_free_table(table);
+	kfree(table);
+	table = 0;
+
 	atomic_sub(data->size, &v);
-	
 
 	kfree(data->pages);
 	kfree(data);
@@ -146,38 +259,6 @@
 	return ret;
 }
 
-static int ion_iommu_print_debug(struct ion_heap *heap, struct seq_file *s,
-				    const struct rb_root *mem_map)
-{
-	seq_printf(s, "Total bytes currently allocated: %d (%x)\n",
-		atomic_read(&v), atomic_read(&v));
-
-	if (mem_map) {
-		struct rb_node *n;
-
-		seq_printf(s, "\nBuffer Info\n");
-		seq_printf(s, "%16.s %16.s %14.s\n",
-			   "client", "creator", "size (hex)");
-
-		for (n = rb_first(mem_map); n; n = rb_next(n)) {
-			struct mem_map_data *data =
-					rb_entry(n, struct mem_map_data, node);
-			const char *client_name = "(null)";
-			const char *creator_name = "(null)";
-
-			if (data->client_name)
-				client_name = data->client_name;
-
-			if (data->creator_name)
-				creator_name = data->creator_name;
-
-			seq_printf(s, "%16.s %16.s %14lu (%lx)\n",
-				   client_name, creator_name, data->size, data->size);
-		}
-	}
-	return 0;
-}
-
 void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
 				struct ion_buffer *buffer)
 {
@@ -208,21 +289,34 @@
 int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
 			       struct vm_area_struct *vma)
 {
-	struct ion_iommu_priv_data *data = buffer->priv_virt;
+	struct sg_table *table = buffer->sg_table;
+	unsigned long addr = vma->vm_start;
+	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+	struct scatterlist *sg;
 	int i;
-	unsigned long curr_addr;
-	if (!data)
-		return -EINVAL;
 
 	if (!ION_IS_CACHED(buffer->flags))
 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
-	curr_addr = vma->vm_start;
-	for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
-		if (vm_insert_page(vma, curr_addr, data->pages[i])) {
-			return -EINVAL;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page = sg_page(sg);
+		unsigned long remainder = vma->vm_end - addr;
+		unsigned long len = sg_dma_len(sg);
+
+		if (offset >= sg_dma_len(sg)) {
+			offset -= sg_dma_len(sg);
+			continue;
+		} else if (offset) {
+			page += offset / PAGE_SIZE;
+			len = sg_dma_len(sg) - offset;
+			offset = 0;
 		}
-		curr_addr += PAGE_SIZE;
+		len = min(len, remainder);
+		remap_pfn_range(vma, addr, page_to_pfn(page), len,
+				vma->vm_page_prot);
+		addr += len;
+		if (addr >= vma->vm_end)
+			return 0;
 	}
 	return 0;
 }
@@ -363,10 +457,6 @@
 static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
 				 struct ion_buffer *buffer)
 {
-	if (buffer->sg_table)
-		sg_free_table(buffer->sg_table);
-	kfree(buffer->sg_table);
-	buffer->sg_table = 0;
 }
 
 static struct ion_heap_ops iommu_heap_ops = {
@@ -380,7 +470,6 @@
 	.cache_op = ion_iommu_cache_ops,
 	.map_dma = ion_iommu_heap_map_dma,
 	.unmap_dma = ion_iommu_heap_unmap_dma,
-	.print_debug = ion_iommu_print_debug,
 };
 
 struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 31a0d81..d494f7a 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_priv.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -32,6 +32,23 @@
 	DI_MAX,
 };
 
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @domain_info - contains the partition number and domain number
+ *		domain_info[1] = domain number
+ *		domain_info[0] = partition number
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ *		(may not be the same as the buffer size)
+ * @flags - iommu domain/partition specific flags.
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
 struct ion_iommu_map {
 	unsigned long iova_addr;
 	struct rb_node node;
@@ -47,12 +64,29 @@
 
 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
 
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref:		refernce count
+ * @node:		node in the ion_device buffers tree
+ * @dev:		back pointer to the ion_device
+ * @heap:		back pointer to the heap the buffer came from
+ * @flags:		buffer specific flags
+ * @size:		size of the buffer
+ * @priv_virt:		private data to the buffer representable as
+ *			a void *
+ * @priv_phys:		private data to the buffer representable as
+ *			an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock:		protects the buffers cnt fields
+ * @kmap_cnt:		number of times the buffer is mapped to the kernel
+ * @vaddr:		the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt:		number of times the buffer is mapped for dma
+ * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
+*/
 struct ion_buffer {
 	struct kref ref;
 	struct rb_node node;
 	struct ion_device *dev;
 	struct ion_heap *heap;
-	struct ion_client *creator;
 	unsigned long flags;
 	size_t size;
 	union {
@@ -70,6 +104,19 @@
 	int marked;
 };
 
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate:		allocate memory
+ * @free:		free memory
+ * @phys		get physical address of a buffer (only define on
+ *			physically contiguous heaps)
+ * @map_dma		map the memory for dma to a scatterlist
+ * @unmap_dma		unmap the memory for dma
+ * @map_kernel		map memory to the kernel
+ * @unmap_kernel	unmap memory to the kernel
+ * @map_user		map memory to userspace
+ * @unmap_user		unmap memory to userspace
+ */
 struct ion_heap_ops {
 	int (*allocate) (struct ion_heap *heap,
 			 struct ion_buffer *buffer, unsigned long len,
@@ -102,6 +149,23 @@
 	int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
 };
 
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node:		rb node to put the heap on the device's tree of heaps
+ * @dev:		back pointer to the ion_device
+ * @type:		type of heap
+ * @ops:		ops struct as above
+ * @id:			id of heap, also indicates priority of this heap when
+ *			allocating.  These are specified by platform data and
+ *			MUST be unique
+ * @name:		used for debugging
+ * @priv:		private heap data
+ *
+ * Represents a pool of memory from which buffers can be made.  In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
 struct ion_heap {
 	struct rb_node node;
 	struct ion_device *dev;
@@ -109,29 +173,58 @@
 	struct ion_heap_ops *ops;
 	int id;
 	const char *name;
+	void *priv;
 };
 
+/**
+ * struct mem_map_data - represents information about the memory map for a heap
+ * @node:		rb node used to store in the tree of mem_map_data
+ * @addr:		start address of memory region.
+ * @addr:		end address of memory region.
+ * @size:		size of memory region
+ * @client_name:		name of the client who owns this buffer.
+ *
+ */
 struct mem_map_data {
 	struct rb_node node;
 	unsigned long addr;
 	unsigned long addr_end;
 	unsigned long size;
 	const char *client_name;
-	const char *creator_name;
 };
 
 #define iommu_map_domain(__m)		((__m)->domain_info[1])
 #define iommu_map_partition(__m)	((__m)->domain_info[0])
 
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl:	arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
 struct ion_device *ion_device_create(long (*custom_ioctl)
 				     (struct ion_client *client,
 				      unsigned int cmd,
 				      unsigned long arg));
 
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev:		the device
+ */
 void ion_device_destroy(struct ion_device *dev);
 
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev:		the device
+ * @heap:		the heap to add
+ */
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
 
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
 
 struct ion_heap *ion_heap_create(struct ion_platform_heap *);
 void ion_heap_destroy(struct ion_heap *);
@@ -154,21 +247,63 @@
 struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *);
 void ion_reusable_heap_destroy(struct ion_heap *);
 
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
 				      unsigned long align);
 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
 		       unsigned long size);
 
+#ifdef CONFIG_CMA
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+#endif
 
 struct ion_heap *msm_get_contiguous_heap(void);
+/**
+ * The carveout/cp heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
 #define ION_CARVEOUT_ALLOCATE_FAIL -1
 #define ION_CP_ALLOCATE_FAIL -1
 
+/**
+ * The reserved heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
 #define ION_RESERVED_ALLOCATE_FAIL -1
 
+/**
+ * ion_map_fmem_buffer - map fmem allocated memory into the kernel
+ * @buffer - buffer to map
+ * @phys_base - physical base of the heap
+ * @virt_base - virtual base of the heap
+ * @flags - flags for the heap
+ *
+ * Map fmem allocated memory into the kernel address space. This
+ * is designed to be used by other heaps that need fmem behavior.
+ * The virtual range must be pre-allocated.
+ */
 void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
 				void *virt_base, unsigned long flags);
 
+/**
+ * ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @uaddr -  virtual address to operate on.
+ * @offset - offset from physical address.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ *		ION_IOC_CLEAN_CACHES
+ *		ION_IOC_INV_CACHES
+ *		ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
 int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
 			void *uaddr, unsigned long offset, unsigned long len,
 			unsigned int cmd);
@@ -178,4 +313,4 @@
 
 void ion_mem_map_show(struct ion_heap *heap);
 
-#endif 
+#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 299c24c..980174e 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -27,6 +27,7 @@
 #include "ion_priv.h"
 #include <mach/memory.h>
 #include <asm/cacheflush.h>
+#include <linux/msm_ion.h>
 
 static atomic_t system_heap_allocated;
 static atomic_t system_contig_heap_allocated;
@@ -206,6 +207,10 @@
 		for_each_sg(table->sgl, sg, table->nents, i) {
 			struct page *page = sg_page(sg);
 			pstart = page_to_phys(page);
+			/*
+			 * If page -> phys is returning NULL, something
+			 * has really gone wrong...
+			 */
 			if (!pstart) {
 				WARN(1, "Could not translate virtual address to physical address\n");
 				return -EINVAL;
@@ -478,7 +483,7 @@
 	}
 	page = virt_to_page(buffer->vaddr);
 
-	sglist = vmalloc(sizeof(*sglist));
+	sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
 	if (!sglist)
 		goto out1;
 
@@ -500,13 +505,13 @@
 		if (ret)
 			goto out2;
 	}
-	vfree(sglist);
+	kfree(sglist);
 	return ret;
 out2:
 	iommu_unmap_range(domain, data->iova_addr, buffer->size);
 
 out1:
-	vfree(sglist);
+	kfree(sglist);
 	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
 						data->mapped_size);
 out:
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
new file mode 100644
index 0000000..692458e
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_mapper.c
@@ -0,0 +1,114 @@
+/*
+ * drivers/gpu/ion/ion_system_mapper.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+/*
+ * This mapper is valid for any heap that allocates memory that already has
+ * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
+ * pages obtained via io_remap, etc.
+ */
+static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
+				   struct ion_buffer *buffer,
+				   struct ion_mapping **mapping)
+{
+	if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+		pr_err("%s: attempting to map an unsupported heap\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	/* XXX REVISIT ME!!! */
+	*((unsigned long *)mapping) = (unsigned long)buffer->priv;
+	return buffer->priv;
+}
+
+static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
+				    struct ion_buffer *buffer,
+				    struct ion_mapping *mapping)
+{
+	if (!((1 << buffer->heap->type) & mapper->heap_mask))
+		pr_err("%s: attempting to unmap an unsupported heap\n",
+		       __func__);
+}
+
+static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
+					struct ion_buffer *buffer,
+					struct ion_mapping *mapping)
+{
+	if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+		pr_err("%s: attempting to unmap an unsupported heap\n",
+		       __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	return buffer->priv;
+}
+
+static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
+				      struct ion_buffer *buffer,
+				      struct vm_area_struct *vma,
+				      struct ion_mapping *mapping)
+{
+	int ret;
+
+	switch (buffer->heap->type) {
+	case ION_HEAP_KMALLOC:
+	{
+		unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
+		ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+				      vma->vm_end - vma->vm_start,
+				      vma->vm_page_prot);
+		break;
+	}
+	case ION_HEAP_VMALLOC:
+		ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
+		break;
+	default:
+		pr_err("%s: attempting to map unsupported heap to userspace\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct ion_mapper_ops ops = {
+	.map = ion_kernel_mapper_map,
+	.map_kernel = ion_kernel_mapper_map_kernel,
+	.map_user = ion_kernel_mapper_map_user,
+	.unmap = ion_kernel_mapper_unmap,
+};
+
+struct ion_mapper *ion_system_mapper_create(void)
+{
+	struct ion_mapper *mapper;
+	mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
+	if (!mapper)
+		return ERR_PTR(-ENOMEM);
+	mapper->type = ION_SYSTEM_MAPPER;
+	mapper->ops = &ops;
+	mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
+	return mapper;
+}
+
+void ion_system_mapper_destroy(struct ion_mapper *mapper)
+{
+	kfree(mapper);
+}
+
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index b274ba2..41e0a04 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2011 Google, Inc
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h
index 950966d..eec66e6 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.h
+++ b/drivers/gpu/ion/msm/ion_cp_common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,12 +15,24 @@
 #define ION_CP_COMMON_H
 
 #include <asm-generic/errno-base.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 
 #define ION_CP_V1	1
 #define ION_CP_V2	2
 
 #if defined(CONFIG_ION_MSM)
+/*
+ * ion_cp2_protect_mem - secures memory via trustzone
+ *
+ * @chunks - physical address of the array containing the chunks to
+ *		be locked down
+ * @nchunks - number of entries in the array
+ * @chunk_size - size of each memory chunk
+ * @usage - usage hint
+ * @lock - 1 for lock, 0 for unlock
+ *
+ * return value is the result of the scm call
+ */
 int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks,
 			unsigned int chunk_size, enum cp_mem_usage usage,
 			int lock);
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 03a33f3..ab5d09b 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,20 +13,92 @@
 
 #include <linux/export.h>
 #include <linux/err.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/memory_alloc.h>
 #include <linux/fmem.h>
+#include <linux/of.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
 #include <mach/ion.h>
 #include <mach/msm_memtypes.h>
 #include "../ion_priv.h"
 #include "ion_cp_common.h"
 
+#define ION_COMPAT_STR	"qcom,msm-ion"
+#define ION_COMPAT_MEM_RESERVE_STR "qcom,msm-ion-reserve"
+
 static struct ion_device *idev;
 static int num_heaps;
 static struct ion_heap **heaps;
 
+struct ion_heap_desc {
+	unsigned int id;
+	enum ion_heap_type type;
+	const char *name;
+	unsigned int permission_type;
+};
+
+
+static struct ion_heap_desc ion_heap_meta[] = {
+	{
+		.id	= ION_SYSTEM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_SYSTEM,
+		.name	= ION_VMALLOC_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_MM_HEAP_NAME,
+		.permission_type = IPT_TYPE_MM_CARVEOUT,
+	},
+	{
+		.id	= ION_MM_FIRMWARE_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_MM_FIRMWARE_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MFC_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_MFC_HEAP_NAME,
+		.permission_type = IPT_TYPE_MFC_SHAREDMEM,
+	},
+	{
+		.id	= ION_SF_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_SF_HEAP_NAME,
+	},
+	{
+		.id	= ION_IOMMU_HEAP_ID,
+		.type	= ION_HEAP_TYPE_IOMMU,
+		.name	= ION_IOMMU_HEAP_NAME,
+	},
+	{
+		.id	= ION_QSECOM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_QSECOM_HEAP_NAME,
+	},
+	{
+		.id	= ION_AUDIO_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_AUDIO_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_WB_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_WB_HEAP_NAME,
+	},
+	{
+		.id	= ION_CAMERA_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_CAMERA_HEAP_NAME,
+	},
+};
+
 struct ion_client *msm_ion_client_create(unsigned int heap_mask,
 					const char *name)
 {
@@ -164,6 +236,15 @@
 	}
 }
 
+/* Fixup heaps in board file to support two heaps being adjacent to each other.
+ * A flag (adjacent_mem_id) in the platform data tells us that the heap phy
+ * memory location must be adjacent to the specified heap. We do this by
+ * carving out memory for both heaps and then splitting up the memory to the
+ * two heaps. The heap specifying the "adjacent_mem_id" get the base of the
+ * memory while heap specified in "adjacent_mem_id" get base+size as its
+ * base address.
+ * Note: Modifies platform data and allocates memory.
+ */
 static void msm_ion_heap_fixup(struct ion_platform_heap heap_data[],
 			       unsigned int nr_heaps)
 {
@@ -183,7 +264,7 @@
 
 	if (!heap->base && heap->extra_data) {
 		unsigned int align = 0;
-		switch (heap->type) {
+		switch ((int) heap->type) {
 		case ION_HEAP_TYPE_CARVEOUT:
 			align =
 			((struct ion_co_heap_pdata *) heap->extra_data)->align;
@@ -260,11 +341,339 @@
 	}
 }
 
+static int msm_init_extra_data(struct ion_platform_heap *heap,
+			       const struct ion_heap_desc *heap_desc)
+{
+	int ret = 0;
+
+	switch ((int) heap->type) {
+	case ION_HEAP_TYPE_CP:
+	{
+		heap->extra_data = kzalloc(sizeof(struct ion_cp_heap_pdata),
+					   GFP_KERNEL);
+		if (!heap->extra_data) {
+			ret = -ENOMEM;
+		} else {
+			struct ion_cp_heap_pdata *extra = heap->extra_data;
+			extra->permission_type = heap_desc->permission_type;
+		}
+		break;
+	}
+	case ION_HEAP_TYPE_CARVEOUT:
+	{
+		heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata),
+					   GFP_KERNEL);
+		if (!heap->extra_data)
+			ret = -ENOMEM;
+		break;
+	}
+	default:
+		heap->extra_data = 0;
+		break;
+	}
+	return ret;
+}
+
+static int msm_ion_populate_heap(struct ion_platform_heap *heap)
+{
+	unsigned int i;
+	int ret = -EINVAL;
+	unsigned int len = ARRAY_SIZE(ion_heap_meta);
+	for (i = 0; i < len; ++i) {
+		if (ion_heap_meta[i].id == heap->id) {
+			heap->name = ion_heap_meta[i].name;
+			heap->type = ion_heap_meta[i].type;
+			ret = msm_init_extra_data(heap, &ion_heap_meta[i]);
+			break;
+		}
+	}
+	if (ret)
+		pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
+	return ret;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+	unsigned int i;
+	for (i = 0; i < pdata->nr; ++i)
+		kfree(pdata->heaps[i].extra_data);
+	kfree(pdata);
+}
+
+static int memtype_to_ion_memtype[] = {
+	[MEMTYPE_SMI_KERNEL] = ION_SMI_TYPE,
+	[MEMTYPE_SMI]	= ION_SMI_TYPE,
+	[MEMTYPE_EBI0] = ION_EBI_TYPE,
+	[MEMTYPE_EBI1] = ION_EBI_TYPE,
+};
+
+static void msm_ion_get_heap_align(struct device_node *node,
+				   struct ion_platform_heap *heap)
+{
+	unsigned int val;
+
+	int ret = of_property_read_u32(node, "qcom,heap-align", &val);
+	if (!ret) {
+		switch ((int) heap->type) {
+		case ION_HEAP_TYPE_CP:
+		{
+			struct ion_cp_heap_pdata *extra =
+						heap->extra_data;
+			extra->align = val;
+			break;
+		}
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra =
+						heap->extra_data;
+			extra->align = val;
+			break;
+		}
+		default:
+			pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n",
+					heap->name);
+			break;
+		}
+	}
+}
+
+static int msm_ion_get_heap_size(struct device_node *node,
+				 struct ion_platform_heap *heap)
+{
+	unsigned int val;
+	int ret = 0;
+	const char *memory_name_prop;
+
+	ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
+	if (!ret) {
+		heap->size = val;
+		ret = of_property_read_string(node,
+					      "qcom,memory-reservation-type",
+					      &memory_name_prop);
+
+		if (!ret && memory_name_prop) {
+			val = msm_get_memory_type_from_name(memory_name_prop);
+			if (val < 0) {
+				ret = -EINVAL;
+				goto out;
+			}
+			heap->memory_type = memtype_to_ion_memtype[val];
+		}
+		if (heap->size && (ret || !memory_name_prop)) {
+			pr_err("%s: Need to specify reservation type\n",
+				__func__);
+			ret = -EINVAL;
+		}
+	} else {
+		ret = 0;
+	}
+out:
+	return ret;
+}
+
+
+static void msm_ion_get_heap_adjacent(struct device_node *node,
+				      struct ion_platform_heap *heap)
+{
+	unsigned int val;
+	int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val);
+	if (!ret) {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra = heap->extra_data;
+			extra->adjacent_mem_id = val;
+			break;
+		}
+		default:
+			pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n",
+				heap->name);
+			break;
+		}
+	} else {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra = heap->extra_data;
+			extra->adjacent_mem_id = INVALID_HEAP_ID;
+			break;
+		}
+		default:
+			break;
+		}
+	}
+}
+
+static struct ion_platform_data *msm_ion_parse_dt(
+					const struct device_node *dt_node)
+{
+	struct ion_platform_data *pdata = 0;
+	struct device_node *node;
+	uint32_t val = 0;
+	int ret = 0;
+	uint32_t num_heaps = 0;
+	int idx = 0;
+
+	for_each_child_of_node(dt_node, node)
+		num_heaps++;
+
+	if (!num_heaps)
+		return ERR_PTR(-EINVAL);
+
+	pdata = kzalloc(sizeof(struct ion_platform_data) +
+			num_heaps*sizeof(struct ion_platform_heap), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->nr = num_heaps;
+
+	for_each_child_of_node(dt_node, node) {
+		/**
+		 * TODO: Replace this with of_get_address() when this patch
+		 * gets merged: http://
+		 * permalink.gmane.org/gmane.linux.drivers.devicetree/18614
+		*/
+		ret = of_property_read_u32(node, "reg", &val);
+		if (ret) {
+			pr_err("%s: Unable to find reg key", __func__);
+			goto free_heaps;
+		}
+		pdata->heaps[idx].id = val;
+
+		ret = msm_ion_populate_heap(&pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_align(node, &pdata->heaps[idx]);
+
+		ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]);
+
+		++idx;
+	}
+	return pdata;
+
+free_heaps:
+	free_pdata(pdata);
+	return ERR_PTR(ret);
+}
+
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = current->active_mm;
+	struct vm_area_struct *vma;
+	int ret = 1;
+
+	if (end < start)
+		goto out;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, start);
+	if (vma && vma->vm_start < end) {
+		if (start < vma->vm_start)
+			goto out_up;
+		if (end > vma->vm_end)
+			goto out_up;
+		ret = 0;
+	}
+
+out_up:
+	up_read(&mm->mmap_sem);
+out:
+	return ret;
+}
+
+static long msm_ion_custom_ioctl(struct ion_client *client,
+				unsigned int cmd,
+				unsigned long arg)
+{
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+	case ION_IOC_INV_CACHES:
+	case ION_IOC_CLEAN_INV_CACHES:
+	{
+		struct ion_flush_data data;
+		unsigned long start, end;
+		struct ion_handle *handle = NULL;
+		int ret;
+
+		if (copy_from_user(&data, (void __user *)arg,
+					sizeof(struct ion_flush_data)))
+			return -EFAULT;
+
+		start = (unsigned long) data.vaddr;
+		end = (unsigned long) data.vaddr + data.length;
+
+		if (check_vaddr_bounds(start, end)) {
+			pr_err("%s: virtual address %p is out of bounds\n",
+				__func__, data.vaddr);
+			return -EINVAL;
+		}
+
+		if (!data.handle) {
+			handle = ion_import_dma_buf(client, data.fd);
+			if (IS_ERR(handle)) {
+				pr_info("%s: Could not import handle: %d\n",
+					__func__, (int)handle);
+				return -EINVAL;
+			}
+		}
+
+		ret = ion_do_cache_op(client,
+				data.handle ? data.handle : handle,
+				data.vaddr, data.offset, data.length,
+				cmd);
+
+		if (!data.handle)
+			ion_free(client, handle);
+
+		if (ret < 0)
+			return ret;
+		break;
+
+	}
+	case ION_IOC_GET_FLAGS:
+	{
+		struct ion_flag_data data;
+		int ret;
+		if (copy_from_user(&data, (void __user *)arg,
+					sizeof(struct ion_flag_data)))
+			return -EFAULT;
+
+		ret = ion_handle_get_flags(client, data.handle, &data.flags);
+		if (ret < 0)
+			return ret;
+		if (copy_to_user((void __user *)arg, &data,
+					sizeof(struct ion_flag_data)))
+			return -EFAULT;
+		break;
+	}
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
+
 static int msm_ion_probe(struct platform_device *pdev)
 {
-	struct ion_platform_data *pdata = pdev->dev.platform_data;
-	int err;
+	struct ion_platform_data *pdata;
+	unsigned int pdata_needs_to_be_freed;
+	int err = -1;
 	int i;
+	if (pdev->dev.of_node) {
+		pdata = msm_ion_parse_dt(pdev->dev.of_node);
+		if (IS_ERR(pdata)) {
+			err = PTR_ERR(pdata);
+			goto out;
+		}
+		pdata_needs_to_be_freed = 1;
+	} else {
+		pdata = pdev->dev.platform_data;
+		pdata_needs_to_be_freed = 0;
+	}
 
 	num_heaps = pdata->nr;
 
@@ -275,7 +684,7 @@
 		goto out;
 	}
 
-	idev = ion_device_create(NULL);
+	idev = ion_device_create(msm_ion_custom_ioctl);
 	if (IS_ERR_OR_NULL(idev)) {
 		err = PTR_ERR(idev);
 		goto freeheaps;
@@ -283,7 +692,7 @@
 
 	msm_ion_heap_fixup(pdata->heaps, num_heaps);
 
-	
+	/* create the heaps as specified in the board file */
 	for (i = 0; i < num_heaps; i++) {
 		struct ion_platform_heap *heap_data = &pdata->heaps[i];
 		msm_ion_allocate(heap_data);
@@ -306,6 +715,8 @@
 
 		ion_device_add_heap(idev, heaps[i]);
 	}
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
 
 	check_for_heap_overlap(pdata->heaps, num_heaps);
 	platform_set_drvdata(pdev, idev);
@@ -313,6 +724,8 @@
 
 freeheaps:
 	kfree(heaps);
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
 out:
 	return err;
 }
@@ -330,10 +743,19 @@
 	return 0;
 }
 
+static struct of_device_id msm_ion_match_table[] = {
+	{.compatible = ION_COMPAT_STR},
+	{},
+};
+EXPORT_COMPAT(ION_COMPAT_MEM_RESERVE_STR);
+
 static struct platform_driver msm_ion_driver = {
 	.probe = msm_ion_probe,
 	.remove = msm_ion_remove,
-	.driver = { .name = "ion-msm" }
+	.driver = {
+		.name = "ion-msm",
+		.of_match_table = msm_ion_match_table,
+	},
 };
 
 static int __init msm_ion_init(void)
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
new file mode 100644
index 0000000..7af6e16
--- /dev/null
+++ b/drivers/gpu/ion/tegra/tegra_ion.c
@@ -0,0 +1,96 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion_priv.h"
+
+struct ion_device *idev;
+struct ion_mapper *tegra_user_mapper;
+int num_heaps;
+struct ion_heap **heaps;
+
+int tegra_ion_probe(struct platform_device *pdev)
+{
+	struct ion_platform_data *pdata = pdev->dev.platform_data;
+	int err;
+	int i;
+
+	num_heaps = pdata->nr;
+
+	heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+	idev = ion_device_create(NULL);
+	if (IS_ERR_OR_NULL(idev)) {
+		kfree(heaps);
+		return PTR_ERR(idev);
+	}
+
+	/* create the heaps as specified in the board file */
+	for (i = 0; i < num_heaps; i++) {
+		struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+		heaps[i] = ion_heap_create(heap_data);
+		if (IS_ERR_OR_NULL(heaps[i])) {
+			err = PTR_ERR(heaps[i]);
+			goto err;
+		}
+		ion_device_add_heap(idev, heaps[i]);
+	}
+	platform_set_drvdata(pdev, idev);
+	return 0;
+err:
+	for (i = 0; i < num_heaps; i++) {
+		if (heaps[i])
+			ion_heap_destroy(heaps[i]);
+	}
+	kfree(heaps);
+	return err;
+}
+
+int tegra_ion_remove(struct platform_device *pdev)
+{
+	struct ion_device *idev = platform_get_drvdata(pdev);
+	int i;
+
+	ion_device_destroy(idev);
+	for (i = 0; i < num_heaps; i++)
+		ion_heap_destroy(heaps[i]);
+	kfree(heaps);
+	return 0;
+}
+
+static struct platform_driver ion_driver = {
+	.probe = tegra_ion_probe,
+	.remove = tegra_ion_remove,
+	.driver = { .name = "ion-tegra" }
+};
+
+static int __init ion_init(void)
+{
+	return platform_driver_register(&ion_driver);
+}
+
+static void __exit ion_exit(void)
+{
+	platform_driver_unregister(&ion_driver);
+}
+
+module_init(ion_init);
+module_exit(ion_exit);
+