msm: kgsl: Create scatter gather lists for memory objects
Create and use scatter-gather lists for memory objects, which
avoids dynamically figuring the physical addresses of pages for
MMU mapping and cache operations.
Change-Id: Ic0dedbad9b973ecce4ae773b6bd682ba01010e5b
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 165bbbf..7e61a32 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1226,7 +1226,11 @@
entry->memdesc.size = size;
entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
- entry->memdesc.ops = &kgsl_contiguous_ops;
+
+ ret = memdesc_sg_phys(&entry->memdesc,
+ phys + (offset & PAGE_MASK), size);
+ if (ret)
+ goto err;
return 0;
err:
@@ -1236,6 +1240,60 @@
return ret;
}
+static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
+ void *addr, int size)
+{
+ int i;
+ int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
+ unsigned long paddr = (unsigned long) addr;
+
+ memdesc->sg = kmalloc(sglen * sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (memdesc->sg == NULL)
+ return -ENOMEM;
+
+ memdesc->sglen = sglen;
+ sg_init_table(memdesc->sg, sglen);
+
+ spin_lock(¤t->mm->page_table_lock);
+
+ for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
+ struct page *page;
+ pmd_t *ppmd;
+ pte_t *ppte;
+ pgd_t *ppgd = pgd_offset(current->mm, paddr);
+
+ if (pgd_none(*ppgd) || pgd_bad(*ppgd))
+ goto err;
+
+ ppmd = pmd_offset(ppgd, paddr);
+ if (pmd_none(*ppmd) || pmd_bad(*ppmd))
+ goto err;
+
+ ppte = pte_offset_map(ppmd, paddr);
+ if (ppte == NULL)
+ goto err;
+
+ page = pfn_to_page(pte_pfn(*ppte));
+ if (!page)
+ goto err;
+
+ sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
+ pte_unmap(ppte);
+ }
+
+ spin_unlock(¤t->mm->page_table_lock);
+
+ return 0;
+
+err:
+ spin_unlock(¤t->mm->page_table_lock);
+ kfree(memdesc->sg);
+ memdesc->sg = NULL;
+
+ return -EINVAL;
+}
+
static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
struct kgsl_pagetable *pagetable,
void *hostptr, unsigned int offset,
@@ -1285,9 +1343,9 @@
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = size;
entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
- entry->memdesc.ops = &kgsl_userptr_ops;
- return 0;
+ return memdesc_sg_virt(&entry->memdesc,
+ hostptr + (offset & PAGE_MASK), size);
}
#ifdef CONFIG_ASHMEM
@@ -1335,11 +1393,13 @@
}
entry->file_ptr = filep;
-
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = ALIGN(size, PAGE_SIZE);
entry->memdesc.hostptr = hostptr;
- entry->memdesc.ops = &kgsl_userptr_ops;
+
+ ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
+ if (ret)
+ goto err;
return 0;
@@ -1725,7 +1785,7 @@
{
struct kgsl_mem_entry *entry = vma->vm_private_data;
- if (!entry->memdesc.ops->vmfault)
+ if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
return VM_FAULT_SIGBUS;
return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
@@ -1772,7 +1832,9 @@
if (entry == NULL)
return -EINVAL;
- if (!entry->memdesc.ops->vmflags || !entry->memdesc.ops->vmfault)
+ if (!entry->memdesc.ops ||
+ !entry->memdesc.ops->vmflags ||
+ !entry->memdesc.ops->vmfault)
return -EINVAL;
vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 8db2cb4..1480df4 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -115,6 +115,8 @@
unsigned int physaddr;
unsigned int size;
unsigned int priv;
+ struct scatterlist *sg;
+ unsigned int sglen;
struct kgsl_memdesc_ops *ops;
};
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
index 202783b..cdf9dc4 100644
--- a/drivers/gpu/msm/kgsl_drm.c
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -293,7 +293,6 @@
}
priv->memdesc.size = obj->size * priv->bufcount;
- priv->memdesc.ops = &kgsl_contiguous_ops;
} else if (TYPE_IS_MEM(priv->type)) {
priv->memdesc.hostptr =
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 383b910..fe5677e 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -659,68 +659,45 @@
return 0;
}
+#define SUPERPTE_IS_DIRTY(_p) \
+(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
+GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
+
static int
kgsl_gpummu_map(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
- int numpages;
- unsigned int pte, ptefirst, ptelast, physaddr;
- int flushtlb;
- unsigned int offset = 0;
+ unsigned int pte;
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
+ struct scatterlist *s;
+ int flushtlb = 0;
+ int i;
- if (!protflags ||
- protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV)) {
- KGSL_CORE_ERR("Invalid protflags for "
- "kgsl_mmu_specific_map: %x", protflags);
- return -EINVAL;
- }
+ pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
- numpages = (memdesc->size >> PAGE_SHIFT);
-
- ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
- ptelast = ptefirst + numpages;
-
- pte = ptefirst;
- flushtlb = 0;
-
- /* tlb needs to be flushed when the first and last pte are not at
- * superpte boundaries */
- if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
- ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
+ /* Flush the TLB if the first PTE isn't at the superpte boundary */
+ if (pte & (GSL_PT_SUPER_PTE - 1))
flushtlb = 1;
- for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
-#ifdef VERBOSE_DEBUG
- /* check if PTE exists */
- uint32_t val = kgsl_pt_map_get(gpummu_pt, pte);
- if (val != 0 && val != GSL_PT_PAGE_DIRTY) {
- KGSL_CORE_ERR("pt entry %x is already set with "
- "value %x for pagetable %p\n", pte, val, gpummu_pt);
- return -EINVAL;
- }
-#endif
- if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
- if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
- flushtlb = 1;
- /* mark pte as in use */
+ for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
+ unsigned int paddr = sg_phys(s);
+ unsigned int j;
- physaddr = memdesc->ops->physaddr(memdesc, offset);
- if (!physaddr) {
- KGSL_CORE_ERR("Failed to convert %x address to "
- "physical", (unsigned int)memdesc->hostptr + offset);
- kgsl_gpummu_unmap(mmu_specific_pt, memdesc);
- return -EFAULT;
+ /* Each sg entry might be multiple pages long */
+ for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
+ if (SUPERPTE_IS_DIRTY(pte))
+ flushtlb = 1;
+ kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
}
- kgsl_pt_map_set(gpummu_pt, pte, physaddr | protflags);
}
- /* Post all writes to the pagetable */
+ /* Flush the TLB if the last PTE isn't at the superpte boundary */
+ if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
+ flushtlb = 1;
+
wmb();
- /* Invalidate tlb only if current page table used by GPU is the
- * pagetable that we used to allocate */
if (flushtlb) {
/*set all devices as needing flushing*/
gpummu_pt->tlb_flags = UINT_MAX;
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index f9b9b4a..f43b96e 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -257,37 +257,33 @@
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
- int ret = 0;
- unsigned int physaddr;
+ int ret, i;
+ struct scatterlist *s;
unsigned int iommu_virt_addr;
- unsigned int offset = 0;
int map_order;
- struct iommu_domain *domain = (struct iommu_domain *)
- mmu_specific_pt;
+ struct iommu_domain *domain = mmu_specific_pt;
BUG_ON(NULL == domain);
map_order = get_order(SZ_4K);
- for (iommu_virt_addr = memdesc->gpuaddr;
- iommu_virt_addr < (memdesc->gpuaddr + memdesc->size);
- iommu_virt_addr += SZ_4K, offset += PAGE_SIZE) {
- physaddr = memdesc->ops->physaddr(memdesc, offset);
- if (!physaddr) {
- KGSL_CORE_ERR("Failed to convert %x address to "
- "physical\n", (unsigned int)memdesc->hostptr + offset);
- kgsl_iommu_unmap(mmu_specific_pt, memdesc);
- return -EFAULT;
- }
- ret = iommu_map(domain, iommu_virt_addr, physaddr,
+ iommu_virt_addr = memdesc->gpuaddr;
+
+ for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
+ unsigned int paddr = sg_phys(s), j;
+ for (j = paddr; j < paddr + s->length; j += PAGE_SIZE) {
+ ret = iommu_map(domain, iommu_virt_addr, j,
map_order, MSM_IOMMU_ATTR_NONCACHED);
- if (ret) {
- KGSL_CORE_ERR("iommu_map(%p, %x, %x, %d, %d) "
- "failed with err: %d\n", domain,
- iommu_virt_addr, physaddr, map_order,
- MSM_IOMMU_ATTR_NONCACHED, ret);
- kgsl_iommu_unmap(mmu_specific_pt, memdesc);
- return ret;
+ if (ret) {
+ KGSL_CORE_ERR("iommu_map(%p, %x, %x, %d, %d) "
+ "failed with err: %d\n", domain,
+ iommu_virt_addr, j, map_order,
+ MSM_IOMMU_ATTR_NONCACHED, ret);
+ kgsl_iommu_unmap(mmu_specific_pt, memdesc);
+ return ret;
+ }
+
+ iommu_virt_addr += SZ_4K;
}
}
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 7eec9e5..1879666 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -525,37 +525,6 @@
*/
}
-unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
-{
- unsigned int physaddr = 0;
- pgd_t *pgd_ptr = NULL;
- pmd_t *pmd_ptr = NULL;
- pte_t *pte_ptr = NULL, pte;
-
- pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
- if (pgd_none(*pgd) || pgd_bad(*pgd)) {
- KGSL_CORE_ERR("Invalid pgd entry\n");
- return 0;
- }
-
- pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
- if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
- KGSL_CORE_ERR("Invalid pmd entry\n");
- return 0;
- }
-
- pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
- if (!pte_ptr) {
- KGSL_CORE_ERR("pt_offset_map failed\n");
- return 0;
- }
- pte = *pte_ptr;
- physaddr = pte_pfn(pte);
- pte_unmap(pte_ptr);
- physaddr <<= PAGE_SHIFT;
- return physaddr;
-}
-
int
kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 09070e4..8f75daa 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -207,28 +207,21 @@
break;
}
}
-#endif
-static unsigned long kgsl_vmalloc_physaddr(struct kgsl_memdesc *memdesc,
- unsigned int offset)
+static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
{
- unsigned int addr;
+ struct scatterlist *s;
+ int i;
- if (offset > memdesc->size)
- return 0;
-
- addr = vmalloc_to_pfn(memdesc->hostptr + offset);
- return addr << PAGE_SHIFT;
+ for_each_sg(sg, s, sglen, i) {
+ unsigned int paddr = sg_phys(s);
+ _outer_cache_range_op(op, paddr, s->length);
+ }
}
-#ifdef CONFIG_OUTER_CACHE
-static void kgsl_vmalloc_outer_cache(struct kgsl_memdesc *memdesc, int op)
+#else
+static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
{
- void *vaddr = memdesc->hostptr;
- for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
- unsigned long paddr = page_to_phys(vmalloc_to_page(vaddr));
- _outer_cache_range_op(op, paddr, PAGE_SIZE);
- }
}
#endif
@@ -306,88 +299,24 @@
memdesc->hostptr, memdesc->physaddr);
}
-static unsigned long kgsl_contiguous_physaddr(struct kgsl_memdesc *memdesc,
- unsigned int offset)
-{
- if (offset > memdesc->size)
- return 0;
-
- return memdesc->physaddr + offset;
-}
-
-#ifdef CONFIG_OUTER_CACHE
-static void kgsl_contiguous_outer_cache(struct kgsl_memdesc *memdesc, int op)
-{
- _outer_cache_range_op(op, memdesc->physaddr, memdesc->size);
-}
-#endif
-
-#ifdef CONFIG_OUTER_CACHE
-static void kgsl_userptr_outer_cache(struct kgsl_memdesc *memdesc, int op)
-{
- void *vaddr = memdesc->hostptr;
- for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
- unsigned long paddr = kgsl_virtaddr_to_physaddr(vaddr);
- if (paddr)
- _outer_cache_range_op(op, paddr, PAGE_SIZE);
- }
-}
-#endif
-
-static unsigned long kgsl_userptr_physaddr(struct kgsl_memdesc *memdesc,
- unsigned int offset)
-{
- return kgsl_virtaddr_to_physaddr(memdesc->hostptr + offset);
-}
-
/* Global - also used by kgsl_drm.c */
struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
- .physaddr = kgsl_vmalloc_physaddr,
.free = kgsl_vmalloc_free,
.vmflags = kgsl_vmalloc_vmflags,
.vmfault = kgsl_vmalloc_vmfault,
-#ifdef CONFIG_OUTER_CACHE
- .outer_cache = kgsl_vmalloc_outer_cache,
-#endif
};
EXPORT_SYMBOL(kgsl_vmalloc_ops);
static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
- .physaddr = kgsl_contiguous_physaddr,
.free = kgsl_ebimem_free,
.vmflags = kgsl_contiguous_vmflags,
.vmfault = kgsl_contiguous_vmfault,
-#ifdef CONFIG_OUTER_CACHE
- .outer_cache = kgsl_contiguous_outer_cache,
-#endif
};
static struct kgsl_memdesc_ops kgsl_coherent_ops = {
- .physaddr = kgsl_contiguous_physaddr,
.free = kgsl_coherent_free,
-#ifdef CONFIG_OUTER_CACHE
- .outer_cache = kgsl_contiguous_outer_cache,
-#endif
};
-/* Global - also used by kgsl.c and kgsl_drm.c */
-struct kgsl_memdesc_ops kgsl_contiguous_ops = {
- .physaddr = kgsl_contiguous_physaddr,
-#ifdef CONFIG_OUTER_CACHE
- .outer_cache = kgsl_contiguous_outer_cache
-#endif
-};
-EXPORT_SYMBOL(kgsl_contiguous_ops);
-
-/* Global - also used by kgsl.c */
-struct kgsl_memdesc_ops kgsl_userptr_ops = {
- .physaddr = kgsl_userptr_physaddr,
-#ifdef CONFIG_OUTER_CACHE
- .outer_cache = kgsl_userptr_outer_cache,
-#endif
-};
-EXPORT_SYMBOL(kgsl_userptr_ops);
-
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
{
void *addr = memdesc->hostptr;
@@ -405,8 +334,7 @@
break;
}
- if (memdesc->ops->outer_cache)
- memdesc->ops->outer_cache(memdesc, op);
+ outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
}
EXPORT_SYMBOL(kgsl_cache_range_op);
@@ -415,7 +343,9 @@
struct kgsl_pagetable *pagetable,
void *ptr, size_t size, unsigned int protflags)
{
- int result;
+ int order, ret = 0;
+ int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
+ int i;
memdesc->size = size;
memdesc->pagetable = pagetable;
@@ -423,25 +353,44 @@
memdesc->ops = &kgsl_vmalloc_ops;
memdesc->hostptr = (void *) ptr;
- kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
-
- result = kgsl_mmu_map(pagetable, memdesc, protflags);
-
- if (result) {
- kgsl_sharedmem_free(memdesc);
- } else {
- int order;
-
- KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
- kgsl_driver.stats.vmalloc_max);
-
- order = get_order(size);
-
- if (order < 16)
- kgsl_driver.stats.histogram[order]++;
+ memdesc->sg = kmalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
+ if (memdesc->sg == NULL) {
+ ret = -ENOMEM;
+ goto done;
}
- return result;
+ memdesc->sglen = sglen;
+ sg_init_table(memdesc->sg, sglen);
+
+ for (i = 0; i < memdesc->sglen; i++, ptr += PAGE_SIZE) {
+ struct page *page = vmalloc_to_page(ptr);
+ if (!page) {
+ ret = -EINVAL;
+ goto done;
+ }
+ sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
+ }
+
+ kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
+
+ ret = kgsl_mmu_map(pagetable, memdesc, protflags);
+
+ if (ret)
+ goto done;
+
+ KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
+ kgsl_driver.stats.vmalloc_max);
+
+ order = get_order(size);
+
+ if (order < 16)
+ kgsl_driver.stats.histogram[order]++;
+
+done:
+ if (ret)
+ kgsl_sharedmem_free(memdesc);
+
+ return ret;
}
int
@@ -494,24 +443,35 @@
int
kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
{
+ int result = 0;
+
size = ALIGN(size, PAGE_SIZE);
+ memdesc->size = size;
+ memdesc->ops = &kgsl_coherent_ops;
+
memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
GFP_KERNEL);
if (memdesc->hostptr == NULL) {
KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
- return -ENOMEM;
+ result = -ENOMEM;
+ goto err;
}
- memdesc->size = size;
- memdesc->ops = &kgsl_coherent_ops;
+ result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
+ if (result)
+ goto err;
/* Record statistics */
KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
- return 0;
+err:
+ if (result)
+ kgsl_sharedmem_free(memdesc);
+
+ return result;
}
EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
@@ -523,9 +483,11 @@
if (memdesc->gpuaddr)
kgsl_mmu_unmap(memdesc->pagetable, memdesc);
- if (memdesc->ops->free)
+ if (memdesc->ops && memdesc->ops->free)
memdesc->ops->free(memdesc);
+ kfree(memdesc->sg);
+
memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);
@@ -534,8 +496,11 @@
_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
- int result;
+ int result = 0;
+ memdesc->size = size;
+ memdesc->pagetable = pagetable;
+ memdesc->ops = &kgsl_ebimem_ops;
memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
if (memdesc->physaddr == 0) {
@@ -544,19 +509,24 @@
return -ENOMEM;
}
- memdesc->size = size;
- memdesc->pagetable = pagetable;
- memdesc->ops = &kgsl_ebimem_ops;
+ result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
+
+ if (result)
+ goto err;
result = kgsl_mmu_map(pagetable, memdesc,
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
if (result)
- kgsl_sharedmem_free(memdesc);
+ goto err;
KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
+err:
+ if (result)
+ kgsl_sharedmem_free(memdesc);
+
return result;
}
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 9e57e78..a9abcf9 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -13,6 +13,7 @@
#ifndef __KGSL_SHAREDMEM_H
#define __KGSL_SHAREDMEM_H
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
struct kgsl_device;
@@ -26,8 +27,6 @@
#define KGSL_MEMFLAGS_CACHED 0x00000001
struct kgsl_memdesc_ops {
- unsigned long (*physaddr)(struct kgsl_memdesc *, unsigned int);
- void (*outer_cache)(struct kgsl_memdesc *, int);
int (*vmflags)(struct kgsl_memdesc *);
int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
struct vm_fault *);
@@ -35,8 +34,6 @@
};
extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
-extern struct kgsl_memdesc_ops kgsl_contiguous_ops;
-extern struct kgsl_memdesc_ops kgsl_userptr_ops;
int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size);
@@ -78,6 +75,22 @@
void kgsl_sharedmem_uninit_sysfs(void);
static inline int
+memdesc_sg_phys(struct kgsl_memdesc *memdesc,
+ unsigned int physaddr, unsigned int size)
+{
+ struct page *page = phys_to_page(physaddr);
+
+ memdesc->sg = kmalloc(sizeof(struct scatterlist) * 1, GFP_KERNEL);
+ if (memdesc->sg == NULL)
+ return -ENOMEM;
+
+ memdesc->sglen = 1;
+ sg_init_table(memdesc->sg, 1);
+ sg_set_page(&memdesc->sg[0], page, size, 0);
+ return 0;
+}
+
+static inline int
kgsl_allocate(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{