gpu: ion: Add ION support for fmem

FMEM allows ION to release memory for other
purposes when ION clients are not using the
memory.

Add new heap type for fmem that make calls
into fmem API to transition memory from T to
C state and vice versa. Add support for
fmem to content protection heap.

Change-Id: I95f949b57c99949e6eafe6a6f5fd147bdaa264f6
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 8ccc8b6..fc1cfb6 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -26,6 +26,7 @@
 #include <linux/vmalloc.h>
 #include <linux/memory_alloc.h>
 #include <linux/seq_file.h>
+#include <linux/fmem.h>
 #include <mach/msm_memtypes.h>
 #include <mach/scm.h>
 #include "ion_priv.h"
@@ -57,6 +58,8 @@
  *			kernel space (un-cached).
  * @umap_count:	the total number of times this heap has been mapped in
  *		user space.
+ * @reusable: indicates if the memory should be reused via fmem.
+ * @reserved_vrange: reserved virtual address range for use with fmem
  */
 struct ion_cp_heap {
 	struct ion_heap heap;
@@ -75,6 +78,8 @@
 	unsigned long kmap_cached_count;
 	unsigned long kmap_uncached_count;
 	unsigned long umap_count;
+	int reusable;
+	void *reserved_vrange;
 };
 
 enum {
@@ -99,7 +104,8 @@
 }
 
 /**
- * Protects memory if heap is unsecured heap.
+ * Protects memory if heap is unsecured heap. Also ensures that we are in
+ * the correct FMEM state if this heap is a reusable heap.
  * Must be called with heap->lock locked.
  */
 static int ion_cp_protect(struct ion_heap *heap)
@@ -109,22 +115,37 @@
 	int ret_value = 0;
 
 	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
+		/* Make sure we are in C state when the heap is protected. */
+		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+			ret_value = fmem_set_state(FMEM_C_STATE);
+			if (ret_value)
+				goto out;
+		}
+
 		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
 				cp_heap->secure_size, cp_heap->permission_type);
 		if (ret_value) {
 			pr_err("Failed to protect memory for heap %s - "
 				"error code: %d\n", heap->name, ret_value);
+
+			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+				if (fmem_set_state(FMEM_T_STATE) != 0)
+					pr_err("%s: unable to transition heap to T-state\n",
+						__func__);
+			}
 		} else {
 			cp_heap->heap_protected = HEAP_PROTECTED;
-			pr_debug("Protected heap %s @ 0x%x\n",
-				heap->name, (unsigned int) cp_heap->base);
+			pr_debug("Protected heap %s @ 0x%lx\n",
+				heap->name, cp_heap->base);
 		}
 	}
+out:
 	return ret_value;
 }
 
 /**
- * Unprotects memory if heap is secure heap.
+ * Unprotects memory if heap is secure heap. Also ensures that we are in
+ * the correct FMEM state if this heap is a reusable heap.
  * Must be called with heap->lock locked.
  */
 static void ion_cp_unprotect(struct ion_heap *heap)
@@ -143,6 +164,12 @@
 			cp_heap->heap_protected = HEAP_NOT_PROTECTED;
 			pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
 				(unsigned int) cp_heap->base);
+
+			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+				if (fmem_set_state(FMEM_T_STATE) != 0)
+					pr_err("%s: unable to transition heap to T-state",
+						__func__);
+			}
 		}
 	}
 }
@@ -176,6 +203,17 @@
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
+	/*
+	 * if this is the first reusable allocation, transition
+	 * the heap
+	 */
+	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+		if (fmem_set_state(FMEM_C_STATE) != 0) {
+			mutex_unlock(&cp_heap->lock);
+			return ION_RESERVED_ALLOCATE_FAIL;
+		}
+	}
+
 	cp_heap->allocated_bytes += size;
 	mutex_unlock(&cp_heap->lock);
 
@@ -194,6 +232,12 @@
 				cp_heap->allocated_bytes, size);
 
 		cp_heap->allocated_bytes -= size;
+
+		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+			if (fmem_set_state(FMEM_T_STATE) != 0)
+				pr_err("%s: unable to transition heap to T-state\n",
+					__func__);
+		}
 		mutex_unlock(&cp_heap->lock);
 
 		return ION_CP_ALLOCATE_FAIL;
@@ -214,6 +258,12 @@
 
 	mutex_lock(&cp_heap->lock);
 	cp_heap->allocated_bytes -= size;
+
+	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+		if (fmem_set_state(FMEM_T_STATE) != 0)
+			pr_err("%s: unable to transition heap to T-state\n",
+				__func__);
+	}
 	mutex_unlock(&cp_heap->lock);
 }
 
@@ -293,6 +343,29 @@
 	return ret_value;
 }
 
+void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
+				void *virt_base, unsigned long flags)
+{
+	int ret;
+	unsigned int offset = buffer->priv_phys - phys_base;
+	unsigned long start = ((unsigned long)virt_base) + offset;
+	const struct mem_type *type = ION_IS_CACHED(flags) ?
+				get_mem_type(MT_DEVICE_CACHED) :
+				get_mem_type(MT_DEVICE);
+
+	if (phys_base > buffer->priv_phys)
+		return NULL;
+
+
+	ret = ioremap_page_range(start, start + buffer->size,
+			buffer->priv_phys, __pgprot(type->prot_pte));
+
+	if (!ret)
+		return (void *)start;
+	else
+		return NULL;
+}
+
 void *ion_cp_heap_map_kernel(struct ion_heap *heap,
 				   struct ion_buffer *buffer,
 				   unsigned long flags)
@@ -311,11 +384,18 @@
 			return NULL;
 		}
 
-		if (ION_IS_CACHED(flags))
-			ret_value = ioremap_cached(buffer->priv_phys,
-						   buffer->size);
-		else
-			ret_value = ioremap(buffer->priv_phys, buffer->size);
+		if (cp_heap->reusable) {
+			ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
+					cp_heap->reserved_vrange, flags);
+
+		} else {
+			if (ION_IS_CACHED(flags))
+				ret_value = ioremap_cached(buffer->priv_phys,
+							   buffer->size);
+			else
+				ret_value = ioremap(buffer->priv_phys,
+						    buffer->size);
+		}
 
 		if (!ret_value) {
 			ion_cp_release_region(cp_heap);
@@ -336,7 +416,11 @@
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
 
-	__arch_iounmap(buffer->vaddr);
+	if (cp_heap->reusable)
+		unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
+	else
+		__arch_iounmap(buffer->vaddr);
+
 	buffer->vaddr = NULL;
 
 	mutex_lock(&cp_heap->lock);
@@ -447,6 +531,7 @@
 	seq_printf(s, "umapping count: %lx\n", umap_count);
 	seq_printf(s, "kmapping count: %lx\n", kmap_count);
 	seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
+	seq_printf(s, "reusable: %s\n", cp_heap->reusable  ? "Yes" : "No");
 
 	return 0;
 }
@@ -531,6 +616,8 @@
 	if (heap_data->extra_data) {
 		struct ion_cp_heap_pdata *extra_data =
 				heap_data->extra_data;
+		cp_heap->reusable = extra_data->reusable;
+		cp_heap->reserved_vrange = extra_data->virt_addr;
 		cp_heap->permission_type = extra_data->permission_type;
 		if (extra_data->secure_size) {
 			cp_heap->secure_base = extra_data->secure_base;
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 78dfe6e..1d40aef 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -230,6 +230,9 @@
 struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
 void ion_cp_heap_destroy(struct ion_heap *);
 
+struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *);
+void ion_reusable_heap_destroy(struct ion_heap *);
+
 /**
  * kernel api to allocate/free from carveout -- used when carveout is
  * used to back an architecture specific custom heap
@@ -248,4 +251,24 @@
 #define ION_CARVEOUT_ALLOCATE_FAIL -1
 #define ION_CP_ALLOCATE_FAIL -1
 
+/**
+ * The reserved heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_RESERVED_ALLOCATE_FAIL -1
+
+/**
+ * ion_map_fmem_buffer - map fmem allocated memory into the kernel
+ * @buffer - buffer to map
+ * @phys_base - physical base of the heap
+ * @virt_base - virtual base of the heap
+ * @flags - flags for the heap
+ *
+ * Map fmem allocated memory into the kernel address space. This
+ * is designed to be used by other heaps that need fmem behavior.
+ * The virtual range must be pre-allocated.
+ */
+void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
+				void *virt_base, unsigned long flags);
+
 #endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index f71f514..2a2892e 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/memory_alloc.h>
+#include <linux/fmem.h>
 #include <mach/ion.h>
 #include <mach/msm_memtypes.h>
 #include "../ion_priv.h"
@@ -86,10 +87,20 @@
 		if (shared_heap) {
 			struct ion_cp_heap_pdata *cp_data =
 			   (struct ion_cp_heap_pdata *) shared_heap->extra_data;
-			heap->base = msm_ion_get_base(
-				heap->size + shared_heap->size,
-				shared_heap->memory_type,
-				co_heap_data->align);
+			if (cp_data->reusable) {
+				const struct fmem_data *fmem_info =
+					fmem_get_info();
+				heap->base = fmem_info->phys -
+					     fmem_info->reserved_size;
+				cp_data->virt_addr = fmem_info->virt;
+				pr_info("ION heap %s using FMEM\n",
+							shared_heap->name);
+			} else {
+				heap->base = msm_ion_get_base(
+					heap->size + shared_heap->size,
+					shared_heap->memory_type,
+					co_heap_data->align);
+			}
 			if (heap->base) {
 				shared_heap->base = heap->base + heap->size;
 				cp_data->secure_base = heap->base;
@@ -138,13 +149,24 @@
 			((struct ion_co_heap_pdata *) heap->extra_data)->align;
 			break;
 		case ION_HEAP_TYPE_CP:
-			align =
-			((struct ion_cp_heap_pdata *) heap->extra_data)->align;
+		{
+			struct ion_cp_heap_pdata *data =
+				(struct ion_cp_heap_pdata *)
+				heap->extra_data;
+			if (data->reusable) {
+				const struct fmem_data *fmem_info =
+					fmem_get_info();
+				heap->base = fmem_info->phys;
+				data->virt_addr = fmem_info->virt;
+				pr_info("ION heap %s using FMEM\n", heap->name);
+			}
+			align = data->align;
 			break;
+		}
 		default:
 			break;
 		}
-		if (align) {
+		if (align && !heap->base) {
 			heap->base = msm_ion_get_base(heap->size,
 						      heap->memory_type,
 						      align);
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 41f99e4..ade87e6 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -99,6 +99,7 @@
 #define ION_WB_HEAP_NAME	"wb"
 #define ION_MM_FIRMWARE_HEAP_NAME	"mm_fw"
 #define ION_QSECOM_HEAP_NAME	"qsecom"
+#define ION_FMEM_HEAP_NAME	"fmem"
 
 #define CACHED          1
 #define UNCACHED        0
@@ -123,6 +124,7 @@
    be converted to phys_addr_t.  For the time being many kernel interfaces
    do not accept phys_addr_t's that would have to */
 #define ion_phys_addr_t unsigned long
+#define ion_virt_addr_t unsigned long
 
 /**
  * struct ion_platform_heap - defines a heap in the given platform
@@ -132,15 +134,8 @@
  * @name:	used for debug purposes
  * @base:	base address of heap in physical memory if applicable
  * @size:	size of the heap in bytes if applicable
- * @memory_type:	Memory type used for the heap
- * @ion_memory_id:		Memory ID used to identify the memory to TZ
- * @request_region: function to be called when the number of allocations goes
- *						from 0 -> 1
- * @release_region: function to be called when the number of allocations goes
- *						from 1 -> 0
- * @setup_region:   function to be called upon ion registration
- *
- * Provided by the board file.
+ * @memory_type:Memory type used for the heap
+ * @extra_data:	Extra data specific to each heap type
  */
 struct ion_platform_heap {
 	enum ion_heap_type type;
@@ -152,16 +147,50 @@
 	void *extra_data;
 };
 
+/**
+ * struct ion_cp_heap_pdata - defines a content protection heap in the given
+ * platform
+ * @permission_type:	Memory ID used to identify the memory to TZ
+ * @align:		Alignment requirement for the memory
+ * @secure_base:	Base address for securing the heap.
+ *			Note: This might be different from actual base address
+ *			of this heap in the case of a shared heap.
+ * @secure_size:	Memory size for securing the heap.
+ *			Note: This might be different from actual size
+ *			of this heap in the case of a shared heap.
+ * @reusable		Flag indicating whether this heap is reusable of not.
+ *			(see FMEM)
+ * @virt_addr:		Virtual address used when using fmem.
+ * @request_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_region:	function to be called upon ion registration
+ *
+ */
 struct ion_cp_heap_pdata {
 	enum ion_permission_type permission_type;
 	unsigned int align;
 	ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
 	size_t secure_size; /* Size used for securing heap when heap is shared*/
+	int reusable;
+	ion_virt_addr_t *virt_addr;
 	int (*request_region)(void *);
 	int (*release_region)(void *);
 	void *(*setup_region)(void);
 };
 
+/**
+ * struct ion_co_heap_pdata - defines a carveout heap in the given platform
+ * @adjacent_mem_id:	Id of heap that this heap must be adjacent to.
+ * @align:		Alignment requirement for the memory
+ * @request_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_region:	function to be called upon ion registration
+ *
+ */
 struct ion_co_heap_pdata {
 	int adjacent_mem_id;
 	unsigned int align;