msm: kgsl: Create scatter gather lists for memory objects

Create and use scatter-gather lists for memory objects, which
avoids dynamically figuring the physical addresses of pages for
MMU mapping and cache operations.

Change-Id: Ic0dedbad9b973ecce4ae773b6bd682ba01010e5b
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 383b910..fe5677e 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -659,68 +659,45 @@
 	return 0;
 }
 
+#define SUPERPTE_IS_DIRTY(_p) \
+(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
+GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
+
 static int
 kgsl_gpummu_map(void *mmu_specific_pt,
 		struct kgsl_memdesc *memdesc,
 		unsigned int protflags)
 {
-	int numpages;
-	unsigned int pte, ptefirst, ptelast, physaddr;
-	int flushtlb;
-	unsigned int offset = 0;
+	unsigned int pte;
 	struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
+	struct scatterlist *s;
+	int flushtlb = 0;
+	int i;
 
-	if (!protflags ||
-		protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV)) {
-		KGSL_CORE_ERR("Invalid protflags for "
-			"kgsl_mmu_specific_map: %x", protflags);
-		return -EINVAL;
-	}
+	pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
 
-	numpages = (memdesc->size >> PAGE_SHIFT);
-
-	ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
-	ptelast = ptefirst + numpages;
-
-	pte = ptefirst;
-	flushtlb = 0;
-
-	/* tlb needs to be flushed when the first and last pte are not at
-	* superpte boundaries */
-	if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
-		((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
+	/* Flush the TLB if the first PTE isn't at the superpte boundary */
+	if (pte & (GSL_PT_SUPER_PTE - 1))
 		flushtlb = 1;
 
-	for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
-#ifdef VERBOSE_DEBUG
-		/* check if PTE exists */
-		uint32_t val = kgsl_pt_map_get(gpummu_pt, pte);
-		if (val != 0 && val != GSL_PT_PAGE_DIRTY) {
-			KGSL_CORE_ERR("pt entry %x is already set with "
-			"value %x for pagetable %p\n", pte, val, gpummu_pt);
-			return -EINVAL;
-		}
-#endif
-		if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
-			if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
-				flushtlb = 1;
-		/* mark pte as in use */
+	for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
+		unsigned int paddr = sg_phys(s);
+		unsigned int j;
 
-		physaddr = memdesc->ops->physaddr(memdesc, offset);
-		if (!physaddr) {
-			KGSL_CORE_ERR("Failed to convert %x address to "
-			"physical", (unsigned int)memdesc->hostptr + offset);
-			kgsl_gpummu_unmap(mmu_specific_pt, memdesc);
-			return -EFAULT;
+		/* Each sg entry might be multiple pages long */
+		for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
+			if (SUPERPTE_IS_DIRTY(pte))
+				flushtlb = 1;
+			kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
 		}
-		kgsl_pt_map_set(gpummu_pt, pte, physaddr | protflags);
 	}
 
-	/* Post all writes to the pagetable */
+	/* Flush the TLB if the last PTE isn't at the superpte boundary */
+	if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
+		flushtlb = 1;
+
 	wmb();
 
-	/* Invalidate tlb only if current page table used by GPU is the
-	 * pagetable that we used to allocate */
 	if (flushtlb) {
 		/*set all devices as needing flushing*/
 		gpummu_pt->tlb_flags = UINT_MAX;