gpu: ion: Map a range into the IOMMU
Instead of mapping 1 4K page at a time into the IOMMU create a
scatterlist and map everything at once. This will be more efficient.
Change-Id: I8e83066869dd6f7a479bad22a66e4c70cc5973b5
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index a7663b6..3c6dc64 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -13,6 +13,7 @@
#include <mach/msm_subsystem_map.h>
#include <linux/memory_alloc.h>
#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <asm/sizes.h>
#include <asm/page.h>
#include <linux/init.h>
@@ -54,33 +55,32 @@
unsigned long size,
int cached)
{
- int i, ret;
- unsigned long temp_iova;
+ int i, ret = 0;
+ struct scatterlist *sglist;
+ unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
+ struct page *dummy_page = phys_to_page(
+ PFN_ALIGN(virt_to_phys(iommu_dummy)));
- for (i = size, temp_iova = start_iova; i > 0; i -= SZ_4K,
- temp_iova += SZ_4K) {
- ret = iommu_map(domain, temp_iova,
- PFN_ALIGN(virt_to_phys(iommu_dummy)),
- get_order(SZ_4K),
- 0);
-
- if (ret) {
- pr_err("%s: could not map %lx to dummy page in domain"
- " %p\n",
- __func__, temp_iova, domain);
- goto out;
- }
+ sglist = vmalloc(sizeof(*sglist) * nrpages);
+ if (!sglist) {
+ ret = -ENOMEM;
+ goto err1;
}
- return 0;
+ sg_init_table(sglist, nrpages);
-out:
+ for (i = 0; i < nrpages; i++)
+ sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
- for ( ; i < size; i += SZ_4K, temp_iova -= SZ_4K)
- iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+ ret = iommu_map_range(domain, start_iova, sglist, size, cached);
+ if (ret) {
+ pr_err("%s: could not map extra %lx in domain %p\n",
+ __func__, start_iova, domain);
+ }
- return -EINVAL;
-
+ vfree(sglist);
+err1:
+ return ret;
}