msm: kgsl: Prevent race conditions when freeing memory
Multiple threads can call an ioctl to free a memory region.
Only one of these threads must be allowed to free memory and
the rest can return without freeing. Add a new pending flag
which is tested and set in a critical section guarded by
spinlock to prevent multiple ioctl threads from freeing the
same memory.
Also, a thread could be freeing a memory region that is
in use by another thread. Ensure that the detachment of the
memory from the process list and freeing of the memory always
happens in the same thread. This will prevent a situation where
the memory is being used by a thread which is detached from the
process list and is yet to be freed.
Seperate the selection of gpuaddr and mapping into pagetable. The
gpuaddr assignment needs to be done with process memory lock held
in a critical section but the mapping into pagetable can be done
separately without holding memory lock.
CRs-Fixed: 495144
Change-Id: Idf85fbd4bca29c18597f4b0e737c207f002ab266
Signed-off-by: Shubhraprakash Das <sadas@codeaurora.org>
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 6336a85..ce4eb67 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -53,6 +53,8 @@
static struct ion_client *kgsl_ion_client;
+static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
+
/**
* kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
* device: KGSL device
@@ -193,6 +195,9 @@
struct kgsl_mem_entry,
refcount);
+ /* Detach from process list */
+ kgsl_mem_entry_detach_process(entry);
+
if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
kgsl_driver.stats.mapped -= entry->memdesc.size;
@@ -224,23 +229,46 @@
EXPORT_SYMBOL(kgsl_mem_entry_destroy);
/**
- * kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree
+ * kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree and
+ * assign it with a gpu address space before insertion
* @process: the process that owns the memory
* @entry: the memory entry
*
- * Insert a kgsl_mem_entry in to the rb_tree for searching by GPU address.
- * Not all mem_entries will have gpu addresses when first created, so this
- * function may be called after creation when the GPU address is finally
- * assigned.
+ * @returns - 0 on succcess else error code
+ *
+ * Insert the kgsl_mem_entry in to the rb_tree for searching by GPU address.
+ * The assignment of gpu address and insertion into list needs to
+ * happen with the memory lock held to avoid race conditions between
+ * gpu address being selected and some other thread looking through the
+ * rb list in search of memory based on gpuaddr
+ * This function should be called with processes memory spinlock held
*/
-static void
+static int
kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process,
struct kgsl_mem_entry *entry)
{
+ int ret = 0;
struct rb_node **node;
struct rb_node *parent = NULL;
- spin_lock(&process->mem_lock);
+ assert_spin_locked(&process->mem_lock);
+ /*
+ * If cpu=gpu map is used then caller needs to set the
+ * gpu address
+ */
+ if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+ if (!entry->memdesc.gpuaddr)
+ goto done;
+ } else if (entry->memdesc.gpuaddr) {
+ WARN_ONCE(1, "gpuaddr assigned w/o holding memory lock\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+ ret = kgsl_mmu_get_gpuaddr(process->pagetable, &entry->memdesc);
+ if (ret)
+ goto done;
+ }
node = &process->mem_rb.rb_node;
@@ -259,7 +287,27 @@
rb_link_node(&entry->node, parent, node);
rb_insert_color(&entry->node, &process->mem_rb);
- spin_unlock(&process->mem_lock);
+done:
+ return ret;
+}
+
+/**
+ * kgsl_mem_entry_untrack_gpuaddr() - Untrack memory that is previously tracked
+ * process - Pointer to process private to which memory belongs
+ * entry - Memory entry to untrack
+ *
+ * Function just does the opposite of kgsl_mem_entry_track_gpuaddr. Needs to be
+ * called with processes spin lock held
+ */
+static void
+kgsl_mem_entry_untrack_gpuaddr(struct kgsl_process_private *process,
+ struct kgsl_mem_entry *entry)
+{
+ assert_spin_locked(&process->mem_lock);
+ if (entry->memdesc.gpuaddr) {
+ kgsl_mmu_put_gpuaddr(process->pagetable, &entry->memdesc);
+ rb_erase(&entry->node, &entry->priv->mem_rb);
+ }
}
/**
@@ -298,8 +346,19 @@
}
entry->priv = process;
- if (entry->memdesc.gpuaddr != 0)
- kgsl_mem_entry_track_gpuaddr(process, entry);
+ spin_lock(&process->mem_lock);
+ ret = kgsl_mem_entry_track_gpuaddr(process, entry);
+ if (ret)
+ idr_remove(&process->mem_idr, entry->id);
+ spin_unlock(&process->mem_lock);
+ if (ret)
+ goto err;
+ /* map the memory after unlocking if gpuaddr has been assigned */
+ if (entry->memdesc.gpuaddr) {
+ ret = kgsl_mmu_map(process->pagetable, &entry->memdesc);
+ if (ret)
+ kgsl_mem_entry_detach_process(entry);
+ }
err:
return ret;
}
@@ -308,37 +367,23 @@
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
{
- bool had_gpuaddr = false;
-
if (entry == NULL)
return;
- /*
- * Unmap the entry first so that there isn't a period of
- * time where kgsl doesn't know about the address range
- * but it is still present in the pagetable. Unmapping will
- * clear the gpuaddr field, so remember if we had a mapping,
- * and an rbtree entry for later.
- */
- had_gpuaddr = entry->memdesc.gpuaddr != 0;
- kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
+ /* Unmap here so that below we can call kgsl_mmu_put_gpuaddr */
+ kgsl_mmu_unmap(entry->priv->pagetable, &entry->memdesc);
spin_lock(&entry->priv->mem_lock);
+ kgsl_mem_entry_untrack_gpuaddr(entry->priv, entry);
if (entry->id != 0)
idr_remove(&entry->priv->mem_idr, entry->id);
entry->id = 0;
- if (had_gpuaddr)
- rb_erase(&entry->node, &entry->priv->mem_rb);
-
+ entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
spin_unlock(&entry->priv->mem_lock);
- entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
entry->priv = NULL;
-
-
- kgsl_mem_entry_put(entry);
}
/* Allocate a new context id */
@@ -736,7 +781,7 @@
rcu_read_unlock();
if (entry == NULL)
break;
- kgsl_mem_entry_detach_process(entry);
+ kgsl_mem_entry_put(entry);
/*
* Always start back at the beginning, to
* ensure all entries are removed,
@@ -1073,9 +1118,10 @@
* @size: length of the region.
*
* Checks that there are no existing allocations within an address
- * region.
+ * region. This function should be called with processes spin lock
+ * held.
*/
-int
+static int
kgsl_sharedmem_region_empty(struct kgsl_process_private *private,
unsigned int gpuaddr, size_t size)
{
@@ -1084,6 +1130,8 @@
struct rb_node *node = private->mem_rb.rb_node;
+ assert_spin_locked(&private->mem_lock);
+
if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
return 0;
@@ -1091,7 +1139,6 @@
if (gpuaddr_end < gpuaddr)
return 0;
- spin_lock(&private->mem_lock);
node = private->mem_rb.rb_node;
while (node != NULL) {
struct kgsl_mem_entry *entry;
@@ -1112,7 +1159,6 @@
break;
}
}
- spin_unlock(&private->mem_lock);
return result;
}
@@ -1140,6 +1186,30 @@
return entry;
}
+/**
+ * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry
+ * @entry - The memory entry
+ *
+ * @returns - true if pending flag was 0 else false
+ *
+ * This function will set the pending free flag if it is previously unset. Used
+ * to prevent race condition between ioctls calling free/freememontimestamp
+ * on the same entry. Whichever thread set's the flag first will do the free.
+ */
+static inline bool kgsl_mem_entry_set_pend(struct kgsl_mem_entry *entry)
+{
+ bool ret = false;
+ spin_lock(&entry->priv->mem_lock);
+ if (entry && entry->pending_free) {
+ ret = false;
+ } else if (entry) {
+ entry->pending_free = 1;
+ ret = true;
+ }
+ spin_unlock(&entry->priv->mem_lock);
+ return ret;
+}
+
/*call all ioctl sub functions with driver locked*/
static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
@@ -1416,7 +1486,7 @@
/* Free the memory for all event types */
trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
- kgsl_mem_entry_detach_process(entry);
+ kgsl_mem_entry_put(entry);
}
static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv,
@@ -1435,6 +1505,12 @@
"invalid gpuaddr %08x\n", gpuaddr);
return -EINVAL;
}
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ KGSL_DRV_WARN(dev_priv->device,
+ "Cannot set pending bit for gpuaddr %08x\n", gpuaddr);
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
trace_kgsl_mem_timestamp_queue(device, entry, context_id,
kgsl_readtimestamp(device, context,
KGSL_TIMESTAMP_RETIRED),
@@ -1534,6 +1610,11 @@
return -EINVAL;
}
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
trace_kgsl_mem_free(entry);
kgsl_memfree_hist_set_event(entry->priv->pid,
@@ -1541,7 +1622,12 @@
entry->memdesc.size,
entry->memdesc.flags);
- kgsl_mem_entry_detach_process(entry);
+ /*
+ * First kgsl_mem_entry_put is for the reference that we took in
+ * this function when calling kgsl_sharedmem_find, second one is
+ * to free the memory since this is a free ioctl
+ */
+ kgsl_mem_entry_put(entry);
kgsl_mem_entry_put(entry);
return 0;
}
@@ -1559,9 +1645,20 @@
KGSL_MEM_INFO(dev_priv->device, "invalid id %d\n", param->id);
return -EINVAL;
}
+
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
trace_kgsl_mem_free(entry);
- kgsl_mem_entry_detach_process(entry);
+ /*
+ * First kgsl_mem_entry_put is for the reference that we took in
+ * this function when calling kgsl_sharedmem_find_id, second one is
+ * to free the memory since this is a free ioctl
+ */
+ kgsl_mem_entry_put(entry);
kgsl_mem_entry_put(entry);
return 0;
}
@@ -2027,18 +2124,15 @@
else if (entry->memdesc.size >= SZ_64K)
kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
- result = kgsl_mmu_map(private->pagetable, &entry->memdesc);
- if (result)
- goto error_put_file_ptr;
-
- /* Adjust the returned value for a non 4k aligned offset */
- param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
/* echo back flags */
param->flags = entry->memdesc.flags;
result = kgsl_mem_entry_attach_process(entry, private);
if (result)
- goto error_unmap;
+ goto error_attach;
+
+ /* Adjust the returned value for a non 4k aligned offset */
+ param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
kgsl_driver.stats.mapped_max);
@@ -2049,9 +2143,7 @@
return result;
-error_unmap:
- kgsl_mmu_unmap(private->pagetable, &entry->memdesc);
-error_put_file_ptr:
+error_attach:
switch (entry->memtype) {
case KGSL_MEM_ENTRY_PMEM:
case KGSL_MEM_ENTRY_ASHMEM:
@@ -2154,7 +2246,6 @@
param->gpuaddr);
return -EINVAL;
}
-
ret = _kgsl_gpumem_sync_cache(entry, KGSL_GPUMEM_CACHE_FLUSH);
kgsl_mem_entry_put(entry);
return ret;
@@ -2218,10 +2309,6 @@
if (result)
return result;
- result = kgsl_mmu_map(private->pagetable, &entry->memdesc);
- if (result)
- goto err;
-
result = kgsl_mem_entry_attach_process(entry, private);
if (result != 0)
goto err;
@@ -2255,12 +2342,6 @@
if (result != 0)
goto err;
- if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
- result = kgsl_mmu_map(private->pagetable, &entry->memdesc);
- if (result)
- goto err;
- }
-
result = kgsl_mem_entry_attach_process(entry, private);
if (result != 0)
goto err;
@@ -2838,8 +2919,30 @@
ret = ALIGN(ret, (1 << align));
/*make sure there isn't a GPU only mapping at this address */
- if (kgsl_sharedmem_region_empty(private, ret, orig_len))
+ spin_lock(&private->mem_lock);
+ if (kgsl_sharedmem_region_empty(private, ret, orig_len)) {
+ int ret_val;
+ /*
+ * We found a free memory map, claim it here with
+ * memory lock held
+ */
+ entry->memdesc.gpuaddr = ret;
+ /* This should never fail */
+ ret_val = kgsl_mem_entry_track_gpuaddr(private, entry);
+ spin_unlock(&private->mem_lock);
+ BUG_ON(ret_val);
+ /* map cannot be called with lock held */
+ ret_val = kgsl_mmu_map(private->pagetable,
+ &entry->memdesc);
+ if (ret_val) {
+ spin_lock(&private->mem_lock);
+ kgsl_mem_entry_untrack_gpuaddr(private, entry);
+ spin_unlock(&private->mem_lock);
+ ret = ret_val;
+ }
break;
+ }
+ spin_unlock(&private->mem_lock);
trace_kgsl_mem_unmapped_area_collision(entry, addr, orig_len,
ret);
@@ -2896,17 +2999,6 @@
if (ret)
return ret;
- if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
- entry->memdesc.gpuaddr = vma->vm_start;
-
- ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
- if (ret) {
- kgsl_mem_entry_put(entry);
- return ret;
- }
- kgsl_mem_entry_track_gpuaddr(private, entry);
- }
-
vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
vma->vm_private_data = entry;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index a7a9104..b2d7cf2 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -151,6 +151,8 @@
#define KGSL_MEMDESC_GLOBAL BIT(1)
/* The memdesc is frozen during a snapshot */
#define KGSL_MEMDESC_FROZEN BIT(2)
+/* The memdesc is mapped into a pagetable */
+#define KGSL_MEMDESC_MAPPED BIT(3)
/* shared memory allocation */
struct kgsl_memdesc {
@@ -188,6 +190,8 @@
/* back pointer to private structure under whose context this
* allocation is made */
struct kgsl_process_private *priv;
+ /* Initialized to 0, set to 1 when entry is marked for freeing */
+ int pending_free;
};
#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 61bbc3a..7ac66cf 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -607,15 +607,20 @@
}
EXPORT_SYMBOL(kgsl_mh_start);
+/**
+ * kgsl_mmu_get_gpuaddr - Assign a memdesc with a gpuadddr from the gen pool
+ * @pagetable - pagetable whose pool is to be used
+ * @memdesc - memdesc to which gpuaddr is assigned
+ *
+ * returns - 0 on success else error code
+ */
int
-kgsl_mmu_map(struct kgsl_pagetable *pagetable,
- struct kgsl_memdesc *memdesc)
+kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
{
- int ret;
struct gen_pool *pool = NULL;
int size;
int page_align = ilog2(PAGE_SIZE);
- unsigned int protflags = kgsl_memdesc_protflags(memdesc);
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
if (memdesc->sglen == 1) {
@@ -687,6 +692,28 @@
return -ENOMEM;
}
}
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr);
+
+int
+kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ int ret = 0;
+ int size;
+ unsigned int protflags = kgsl_memdesc_protflags(memdesc);
+
+ if (!memdesc->gpuaddr)
+ return -EINVAL;
+ /* Only global mappings should be mapped multiple times */
+ if (!kgsl_memdesc_is_global(memdesc) &&
+ (KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
+ /* Add space for the guard page when allocating the mmu VA. */
+ size = memdesc->size;
+ if (kgsl_memdesc_has_guard_page(memdesc))
+ size += PAGE_SIZE;
if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
spin_lock(&pagetable->lock);
@@ -696,7 +723,7 @@
spin_lock(&pagetable->lock);
if (ret)
- goto err_free_gpuaddr;
+ goto done;
/* Keep track of the statistics for the sysfs files */
@@ -707,34 +734,76 @@
pagetable->stats.max_mapped);
spin_unlock(&pagetable->lock);
+ memdesc->priv |= KGSL_MEMDESC_MAPPED;
return 0;
-err_free_gpuaddr:
+done:
spin_unlock(&pagetable->lock);
- if (pool)
- gen_pool_free(pool, memdesc->gpuaddr, size);
- memdesc->gpuaddr = 0;
return ret;
}
EXPORT_SYMBOL(kgsl_mmu_map);
+/**
+ * kgsl_mmu_put_gpuaddr - Free a gpuaddress from memory pool
+ * @pagetable - pagetable whose pool memory is freed from
+ * @memdesc - memdesc whose gpuaddress is freed
+ *
+ * returns - 0 on success else error code
+ */
+int
+kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ struct gen_pool *pool;
+ int size;
+
+ if (memdesc->size == 0 || memdesc->gpuaddr == 0)
+ return 0;
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
+ goto done;
+
+ /* Add space for the guard page when freeing the mmu VA. */
+ size = memdesc->size;
+ if (kgsl_memdesc_has_guard_page(memdesc))
+ size += PAGE_SIZE;
+
+ pool = pagetable->pool;
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ if (kgsl_memdesc_is_global(memdesc))
+ pool = pagetable->kgsl_pool;
+ else if (kgsl_memdesc_use_cpu_map(memdesc))
+ pool = NULL;
+ }
+ if (pool)
+ gen_pool_free(pool, memdesc->gpuaddr, size);
+ /*
+ * Don't clear the gpuaddr on global mappings because they
+ * may be in use by other pagetables
+ */
+done:
+ if (!kgsl_memdesc_is_global(memdesc))
+ memdesc->gpuaddr = 0;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
+
int
kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
- struct gen_pool *pool;
int size;
unsigned int start_addr = 0;
unsigned int end_addr = 0;
- if (memdesc->size == 0 || memdesc->gpuaddr == 0)
- return 0;
+ if (memdesc->size == 0 || memdesc->gpuaddr == 0 ||
+ !(KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
- if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
- memdesc->gpuaddr = 0;
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
return 0;
- }
/* Add space for the guard page when freeing the mmu VA. */
size = memdesc->size;
@@ -761,24 +830,8 @@
pagetable->stats.mapped -= size;
spin_unlock(&pagetable->lock);
-
- pool = pagetable->pool;
-
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
- if (kgsl_memdesc_is_global(memdesc))
- pool = pagetable->kgsl_pool;
- else if (kgsl_memdesc_use_cpu_map(memdesc))
- pool = NULL;
- }
- if (pool)
- gen_pool_free(pool, memdesc->gpuaddr, size);
-
- /*
- * Don't clear the gpuaddr on global mappings because they
- * may be in use by other pagetables
- */
if (!kgsl_memdesc_is_global(memdesc))
- memdesc->gpuaddr = 0;
+ memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_unmap);
@@ -799,9 +852,12 @@
gpuaddr = memdesc->gpuaddr;
memdesc->priv |= KGSL_MEMDESC_GLOBAL;
- result = kgsl_mmu_map(pagetable, memdesc);
+ result = kgsl_mmu_get_gpuaddr(pagetable, memdesc);
if (result)
goto error;
+ result = kgsl_mmu_map(pagetable, memdesc);
+ if (result)
+ goto error_put_gpuaddr;
/*global mappings must have the same gpu address in all pagetables*/
if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
@@ -813,6 +869,8 @@
return result;
error_unmap:
kgsl_mmu_unmap(pagetable, memdesc);
+error_put_gpuaddr:
+ kgsl_mmu_put_gpuaddr(pagetable, memdesc);
error:
return result;
}
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 23aae1c..a1e1835 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -205,10 +205,14 @@
int kgsl_mmu_close(struct kgsl_device *device);
int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
+int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
+int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 0bc66c2..34f89e7 100755
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -752,8 +752,10 @@
if (memdesc == NULL || memdesc->size == 0)
return;
- if (memdesc->gpuaddr)
+ if (memdesc->gpuaddr) {
kgsl_mmu_unmap(memdesc->pagetable, memdesc);
+ kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
+ }
if (memdesc->ops && memdesc->ops->free)
memdesc->ops->free(memdesc);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index c000cbb..b08c6d6 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -263,6 +263,11 @@
ret = kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
if (ret)
return ret;
+ ret = kgsl_mmu_get_gpuaddr(pagetable, memdesc);
+ if (ret) {
+ kgsl_sharedmem_free(memdesc);
+ return ret;
+ }
ret = kgsl_mmu_map(pagetable, memdesc);
if (ret)
kgsl_sharedmem_free(memdesc);