msm: kgsl: don't fault in cached allocations
Make sure cache operations don't hit pagefaults by
backing the entire vma in mmap() instead of faulting
in pages as they're touched. Otherwise, there's a
chance that a later cache operation on the mapping
could trigger an unhandled page fault leading to
a kernel panic.
Change-Id: Ia73c8aaed2708c5b9ef46ed50fb0f5cf1ad2450c
Signed-off-by: Jeremy Gebben <jgebben@codeaurora.org>
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 9f2df27..f286d06 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2699,6 +2699,29 @@
}
vma->vm_ops = &kgsl_gpumem_vm_ops;
+
+ if (cache == KGSL_CACHEMODE_WRITEBACK
+ || cache == KGSL_CACHEMODE_WRITETHROUGH) {
+ struct scatterlist *s;
+ int i;
+ int sglen = entry->memdesc.sglen;
+ unsigned long addr = vma->vm_start;
+
+ /* don't map in the guard page, it should always fault */
+ if (kgsl_memdesc_has_guard_page(&entry->memdesc))
+ sglen--;
+
+ for_each_sg(entry->memdesc.sg, s, sglen, i) {
+ int j;
+ for (j = 0; j < (sg_dma_len(s) >> PAGE_SHIFT); j++) {
+ struct page *page = sg_page(s);
+ page = nth_page(page, j);
+ vm_insert_page(vma, addr, page);
+ addr += PAGE_SIZE;
+ }
+ }
+ }
+
vma->vm_file = file;
entry->memdesc.useraddr = vma->vm_start;