gpu: ion: Add decrement of mapping count
Add decrement of mapping count when mapping fails
and corresponding SMI release region call.
Change-Id: I81fb7eeee9973c770a65f02236c5358ce313e3a0
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 1c35f5c..10daf91 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -131,28 +131,55 @@
vfree(buffer->sglist);
}
+static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
+{
+ int ret_value = 0;
+ if (atomic_inc_return(&carveout_heap->map_count) == 1) {
+ if (carveout_heap->request_region) {
+ ret_value = carveout_heap->request_region(
+ carveout_heap->bus_id);
+ if (ret_value) {
+ pr_err("Unable to request SMI region");
+ atomic_dec(&carveout_heap->map_count);
+ }
+ }
+ }
+ return ret_value;
+}
+
+static int ion_carveout_release_region(struct ion_carveout_heap *carveout_heap)
+{
+ int ret_value = 0;
+ if (atomic_dec_and_test(&carveout_heap->map_count)) {
+ if (carveout_heap->release_region) {
+ ret_value = carveout_heap->release_region(
+ carveout_heap->bus_id);
+ if (ret_value)
+ pr_err("Unable to release SMI region");
+ }
+ }
+ return ret_value;
+}
+
void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long flags)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
+ void *ret_value;
- if (atomic_inc_return(&carveout_heap->map_count) == 1) {
- if (carveout_heap->request_region) {
- int ret = carveout_heap->request_region(
- carveout_heap->bus_id);
- if (ret) {
- pr_err("Unable to request SMI region");
- atomic_dec(&carveout_heap->map_count);
- return NULL;
- }
- }
- }
+ if (ion_carveout_request_region(carveout_heap))
+ return NULL;
+
if (ION_IS_CACHED(flags))
- return ioremap_cached(buffer->priv_phys, buffer->size);
+ ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
else
- return ioremap(buffer->priv_phys, buffer->size);
+ ret_value = ioremap(buffer->priv_phys, buffer->size);
+
+ if (!ret_value)
+ ion_carveout_release_region(carveout_heap);
+ return ret_value;
}
void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
@@ -164,15 +191,7 @@
__arch_iounmap(buffer->vaddr);
buffer->vaddr = NULL;
- if (atomic_dec_and_test(&carveout_heap->map_count)) {
- if (carveout_heap->release_region) {
- int ret = carveout_heap->release_region(
- carveout_heap->bus_id);
- if (ret)
- pr_err("Unable to release SMI region");
- }
- }
-
+ ion_carveout_release_region(carveout_heap);
return;
}
@@ -181,29 +200,25 @@
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
+ int ret_value = 0;
- if (atomic_inc_return(&carveout_heap->map_count) == 1) {
- if (carveout_heap->request_region) {
- int ret = carveout_heap->request_region(
- carveout_heap->bus_id);
- if (ret) {
- pr_err("Unable to request SMI region");
- atomic_dec(&carveout_heap->map_count);
- return -EINVAL;
- }
- }
- }
+ if (ion_carveout_request_region(carveout_heap))
+ return -EINVAL;
if (ION_IS_CACHED(flags))
- return remap_pfn_range(vma, vma->vm_start,
+ ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
else
- return remap_pfn_range(vma, vma->vm_start,
+ ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
pgprot_noncached(vma->vm_page_prot));
+
+ if (ret_value)
+ ion_carveout_release_region(carveout_heap);
+ return ret_value;
}
void ion_carveout_heap_unmap_user(struct ion_heap *heap,
@@ -211,15 +226,7 @@
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
-
- if (atomic_dec_and_test(&carveout_heap->map_count)) {
- if (carveout_heap->release_region) {
- int ret = carveout_heap->release_region(
- carveout_heap->bus_id);
- if (ret)
- pr_err("Unable to release SMI region");
- }
- }
+ ion_carveout_release_region(carveout_heap);
}
int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,