gpu: ion: Add cache flushing APIs

Add ioctl to support flushing the caches of ion
buffers from userspace.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index de75935..a9dfc60 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -587,6 +587,80 @@
 	return handle;
 }
 
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = current->active_mm;
+	struct vm_area_struct *vma;
+	int ret = 1;
+
+	if (end < start)
+		goto out;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, start);
+	if (vma && vma->vm_start < end) {
+		if (start < vma->vm_start)
+			goto out_up;
+		if (end > vma->vm_end)
+			goto out_up;
+		ret = 0;
+	}
+
+out_up:
+	up_read(&mm->mmap_sem);
+out:
+	return ret;
+}
+
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *uaddr, unsigned long offset, unsigned long len,
+			unsigned int cmd)
+{
+	struct ion_buffer *buffer;
+	unsigned long start, end;
+	int ret = -EINVAL;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to do_cache_op.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+
+	if (ION_IS_CACHED(buffer->flags)) {
+		ret = 0;
+		goto out;
+	}
+
+	if (!handle->buffer->heap->ops->cache_op) {
+		pr_err("%s: cache_op is not implemented by this heap.\n",
+		       __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	start = (unsigned long) uaddr;
+	end = (unsigned long) uaddr + len;
+
+	if (check_vaddr_bounds(start, end)) {
+		pr_err("%s: virtual address %p is out of bounds\n",
+			__func__, uaddr);
+		goto out;
+	}
+
+	ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
+						offset, len, cmd);
+
+out:
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return ret;
+
+}
+
 static const struct file_operations ion_share_fops;
 
 struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
@@ -1075,6 +1149,20 @@
 			return -EFAULT;
 		return dev->custom_ioctl(client, data.cmd, data.arg);
 	}
+	case ION_IOC_CLEAN_CACHES:
+	case ION_IOC_INV_CACHES:
+	case ION_IOC_CLEAN_INV_CACHES:
+	{
+		struct ion_flush_data data;
+
+		if (copy_from_user(&data, (void __user *)arg,
+				sizeof(struct ion_flush_data)))
+			return -EFAULT;
+
+		return ion_do_cache_op(client, data.handle, data.vaddr,
+					data.offset, data.length, cmd);
+
+	}
 	default:
 		return -ENOTTY;
 	}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 5bd18e2..44536c8 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -129,6 +129,32 @@
 					pgprot_noncached(vma->vm_page_prot));
 }
 
+int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	unsigned long vstart, pstart;
+
+	pstart = buffer->priv_phys + offset;
+	vstart = (unsigned long)vaddr;
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		clean_caches(vstart, length, pstart);
+		break;
+	case ION_IOC_INV_CACHES:
+		invalidate_caches(vstart, length, pstart);
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		clean_and_invalidate_caches(vstart, length, pstart);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static struct ion_heap_ops carveout_heap_ops = {
 	.allocate = ion_carveout_heap_allocate,
 	.free = ion_carveout_heap_free,
@@ -136,6 +162,7 @@
 	.map_user = ion_carveout_heap_map_user,
 	.map_kernel = ion_carveout_heap_map_kernel,
 	.unmap_kernel = ion_carveout_heap_unmap_kernel,
+	.cache_op = ion_carveout_cache_ops,
 };
 
 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 581abe5..fd5c125 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -101,6 +101,9 @@
 	void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
 	int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
 			 struct vm_area_struct *vma, unsigned long flags);
+	int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset,
+			unsigned int length, unsigned int cmd);
 };
 
 /**
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 022edf6..5609b72 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include "ion_priv.h"
+#include <mach/memory.h>
 
 static int ion_system_heap_allocate(struct ion_heap *heap,
 				     struct ion_buffer *buffer,
@@ -103,6 +104,51 @@
 	}
 }
 
+int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	unsigned long vstart, pstart;
+	void *vtemp;
+	unsigned long ln = 0;
+	void (*op)(unsigned long, unsigned long, unsigned long);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		op = clean_caches;
+		break;
+	case ION_IOC_INV_CACHES:
+		op = invalidate_caches;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		op = clean_and_invalidate_caches;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	for (vtemp = buffer->priv_virt + offset,
+	     vstart = (unsigned long) vaddr;
+			ln < length;
+			vtemp += PAGE_SIZE, ln += PAGE_SIZE,
+			vstart += PAGE_SIZE) {
+		pstart = page_to_phys(vmalloc_to_page(vtemp));
+		/*
+		 * If vmalloc -> page -> phys is returning NULL, something
+		 * has really gone wrong...
+		 */
+		if (!pstart) {
+			WARN(1, "Could not translate %p to physical address\n",
+				vtemp);
+			return -EINVAL;
+		}
+
+		op(vstart, PAGE_SIZE, pstart);
+	}
+
+	return 0;
+}
+
 static struct ion_heap_ops vmalloc_ops = {
 	.allocate = ion_system_heap_allocate,
 	.free = ion_system_heap_free,
@@ -111,6 +157,7 @@
 	.map_kernel = ion_system_heap_map_kernel,
 	.unmap_kernel = ion_system_heap_unmap_kernel,
 	.map_user = ion_system_heap_map_user,
+	.cache_op = ion_system_heap_cache_ops,
 };
 
 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
@@ -186,6 +233,39 @@
 	}
 }
 
+int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
+			struct ion_buffer *buffer, void *vaddr,
+			unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	unsigned long vstart, pstart;
+
+	pstart = virt_to_phys(buffer->priv_virt) + offset;
+	if (!pstart) {
+		WARN(1, "Could not do virt to phys translation on %p\n",
+			buffer->priv_virt);
+		return -EINVAL;
+	}
+
+	vstart = (unsigned long) vaddr;
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		clean_caches(vstart, length, pstart);
+		break;
+	case ION_IOC_INV_CACHES:
+		invalidate_caches(vstart, length, pstart);
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		clean_and_invalidate_caches(vstart, length, pstart);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static struct ion_heap_ops kmalloc_ops = {
 	.allocate = ion_system_contig_heap_allocate,
 	.free = ion_system_contig_heap_free,
@@ -195,6 +275,7 @@
 	.map_kernel = ion_system_heap_map_kernel,
 	.unmap_kernel = ion_system_heap_unmap_kernel,
 	.map_user = ion_system_contig_heap_map_user,
+	.cache_op = ion_system_contig_heap_cache_ops,
 };
 
 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 15ba708..df44376 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -17,9 +17,11 @@
 #ifndef _LINUX_ION_H
 #define _LINUX_ION_H
 
+#include <linux/ioctl.h>
 #include <linux/types.h>
 #include <mach/ion.h>
 
+
 struct ion_handle;
 /**
  * enum ion_heap_types - list of all possible types of heaps
@@ -330,6 +332,24 @@
 	unsigned long arg;
 };
 
+
+/* struct ion_flush_data - data passed to ion for flushing caches
+ *
+ * @handle:	handle with data to flush
+ * @vaddr:	userspace virtual address mapped with mmap
+ * @offset:	offset into the handle to flush
+ * @length:	length of handle to flush
+ *
+ * Performs cache operations on the handle. If p is the start address
+ * of the handle, p + offset through p + offset + length will have
+ * the cache operations performed
+ */
+struct ion_flush_data {
+	struct ion_handle *handle;
+	void *vaddr;
+	unsigned int offset;
+	unsigned int length;
+};
 #define ION_IOC_MAGIC		'I'
 
 /**
@@ -386,4 +406,26 @@
  */
 #define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
 
+
+/**
+ * DOC: ION_IOC_CLEAN_CACHES - clean the caches
+ *
+ * Clean the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_CACHES	_IOWR(ION_IOC_MAGIC, 7, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_MSM_IOC_INV_CACHES - invalidate the caches
+ *
+ * Invalidate the caches of the handle specified.
+ */
+#define ION_IOC_INV_CACHES	_IOWR(ION_IOC_MAGIC, 8, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_MSM_IOC_CLEAN_CACHES - clean and invalidate the caches
+ *
+ * Clean and invalidate the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MAGIC, 9, \
+						struct ion_flush_data)
 #endif /* _LINUX_ION_H */