gpu: ion: Add support for cached and uncached mappings

Add explicit support for cached and uncached mappings. Functions
now describe whether mappings will be cached or uncached.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 37b23af..d4ca762 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -407,7 +407,8 @@
 	return ret;
 }
 
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+			unsigned long flags)
 {
 	struct ion_buffer *buffer;
 	void *vaddr;
@@ -431,21 +432,38 @@
 		return ERR_PTR(-ENODEV);
 	}
 
+	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
+		if (buffer->flags != flags) {
+			pr_err("%s: buffer was already mapped with flags %lx,"
+				" cannot map with flags %lx\n", __func__,
+				buffer->flags, flags);
+			vaddr = ERR_PTR(-EEXIST);
+			goto out;
+		}
+
+	} else {
+		buffer->flags = flags;
+	}
+
 	if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
-		vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+		vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
+							flags);
 		if (IS_ERR_OR_NULL(vaddr))
 			_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
 		buffer->vaddr = vaddr;
 	} else {
 		vaddr = buffer->vaddr;
 	}
+
+out:
 	mutex_unlock(&buffer->lock);
 	mutex_unlock(&client->lock);
 	return vaddr;
 }
 
 struct scatterlist *ion_map_dma(struct ion_client *client,
-				struct ion_handle *handle)
+				struct ion_handle *handle,
+				unsigned long flags)
 {
 	struct ion_buffer *buffer;
 	struct scatterlist *sglist;
@@ -467,6 +485,20 @@
 		mutex_unlock(&client->lock);
 		return ERR_PTR(-ENODEV);
 	}
+
+	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
+		if (buffer->flags != flags) {
+			pr_err("%s: buffer was already mapped with flags %lx,"
+				" cannot map with flags %lx\n", __func__,
+				buffer->flags, flags);
+			sglist = ERR_PTR(-EEXIST);
+			goto out;
+		}
+
+	} else {
+		buffer->flags = flags;
+	}
+
 	if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
 		sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
 		if (IS_ERR_OR_NULL(sglist))
@@ -475,6 +507,8 @@
 	} else {
 		sglist = buffer->sglist;
 	}
+
+out:
 	mutex_unlock(&buffer->lock);
 	mutex_unlock(&client->lock);
 	return sglist;
@@ -774,6 +808,9 @@
 	struct ion_buffer *buffer = file->private_data;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
+	mutex_lock(&buffer->lock);
+	buffer->umap_cnt--;
+	mutex_unlock(&buffer->lock);
 	/* drop the reference to the buffer -- this prevents the
 	   buffer from going away because the client holding it exited
 	   while it was being passed */
@@ -840,6 +877,10 @@
 	struct ion_client *client;
 	struct ion_handle *handle;
 	int ret;
+	unsigned long flags = file->f_flags & O_DSYNC ?
+				ION_SET_CACHE(UNCACHED) :
+				ION_SET_CACHE(CACHED);
+
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
 	/* make sure the client still exists, it's possible for the client to
@@ -875,13 +916,28 @@
 	}
 
 	mutex_lock(&buffer->lock);
+	if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
+		if (buffer->flags != flags) {
+			pr_err("%s: buffer was already mapped with flags %lx,"
+				" cannot map with flags %lx\n", __func__,
+				buffer->flags, flags);
+			ret = -EEXIST;
+			mutex_unlock(&buffer->lock);
+			goto err1;
+		}
+
+	} else {
+		buffer->flags = flags;
+	}
 	/* now map it to userspace */
-	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
+						flags);
+	buffer->umap_cnt++;
 	mutex_unlock(&buffer->lock);
 	if (ret) {
 		pr_err("%s: failure mapping buffer to userspace\n",
 		       __func__);
-		goto err1;
+		goto err2;
 	}
 
 	vma->vm_ops = &ion_vm_ops;
@@ -895,8 +951,10 @@
 		 atomic_read(&buffer->ref.refcount));
 	return 0;
 
-err1:
+err2:
+	buffer->umap_cnt--;
 	/* drop the reference to the handle */
+err1:
 	ion_handle_put(handle);
 err:
 	/* drop the reference to the client */
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 4949677..a50e697 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -97,10 +97,13 @@
 }
 
 void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
-				   struct ion_buffer *buffer)
+				   struct ion_buffer *buffer,
+				   unsigned long flags)
 {
-	return __arch_ioremap(buffer->priv_phys, buffer->size,
-			      MT_MEMORY_NONCACHED);
+	if (flags & ION_SET_CACHE(CACHED))
+		return ioremap(buffer->priv_phys, buffer->size);
+	else
+		return ioremap_cached(buffer->priv_phys, buffer->size);
 }
 
 void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
@@ -112,12 +115,18 @@
 }
 
 int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-			       struct vm_area_struct *vma)
+			       struct vm_area_struct *vma, unsigned long flags)
 {
-	return remap_pfn_range(vma, vma->vm_start,
+	if (flags & ION_SET_CACHE(CACHED))
+		return remap_pfn_range(vma, vma->vm_start,
 			       __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
 			       buffer->size,
-			       pgprot_noncached(vma->vm_page_prot));
+			       vma->vm_page_prot);
+	else
+		return remap_pfn_range(vma, vma->vm_start,
+			       __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+					buffer->size,
+					pgprot_noncached(vma->vm_page_prot));
 }
 
 static struct ion_heap_ops carveout_heap_ops = {
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 3323954..581abe5 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -71,6 +71,7 @@
 	void *vaddr;
 	int dmap_cnt;
 	struct scatterlist *sglist;
+	int umap_cnt;
 };
 
 /**
@@ -95,10 +96,11 @@
 	struct scatterlist *(*map_dma) (struct ion_heap *heap,
 					struct ion_buffer *buffer);
 	void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
-	void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+	void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
+				unsigned long flags);
 	void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
 	int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
-			 struct vm_area_struct *vma);
+			 struct vm_area_struct *vma, unsigned long flags);
 };
 
 /**
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index c046cf1..b34b455 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -75,9 +75,15 @@
 }
 
 void *ion_system_heap_map_kernel(struct ion_heap *heap,
-				 struct ion_buffer *buffer)
+				 struct ion_buffer *buffer,
+				 unsigned long flags)
 {
-	return buffer->priv_virt;
+	if (flags & ION_SET_CACHE(CACHED))
+		return buffer->priv_virt;
+	else {
+		pr_err("%s: cannot map system heap uncached\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
 }
 
 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
@@ -86,9 +92,15 @@
 }
 
 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-			     struct vm_area_struct *vma)
+			     struct vm_area_struct *vma, unsigned long flags)
 {
-	return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff);
+	if (flags & ION_SET_CACHE(CACHED))
+		return remap_vmalloc_range(vma, buffer->priv_virt,
+						vma->vm_pgoff);
+	else {
+		pr_err("%s: cannot map system heap uncached\n", __func__);
+		return -EINVAL;
+	}
 }
 
 static struct ion_heap_ops vmalloc_ops = {
@@ -159,13 +171,19 @@
 
 int ion_system_contig_heap_map_user(struct ion_heap *heap,
 				    struct ion_buffer *buffer,
-				    struct vm_area_struct *vma)
+				    struct vm_area_struct *vma,
+				    unsigned long flags)
 {
 	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
-	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+
+	if (flags & ION_SET_CACHE(CACHED))
+		return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
 			       vma->vm_end - vma->vm_start,
 			       vma->vm_page_prot);
-
+	else {
+		pr_err("%s: cannot map system heap uncached\n", __func__);
+		return -EINVAL;
+	}
 }
 
 static struct ion_heap_ops kmalloc_ops = {