xtensa: avoid mmap cache aliasing

Provide arch_get_unmapped_area function aligning shared memory mapping
addresses to the biggest of the page size or the cache way size. That
guarantees that corresponding virtual addresses of shared mappings are
cached by the same cache sets.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 54fa842..5d3f7a1 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -36,6 +36,10 @@
 #include <uapi/asm/unistd.h>
 };
 
+#define COLOUR_ALIGN(addr, pgoff) \
+	((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+	 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
 asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
 {
 	unsigned long ret;
@@ -52,3 +56,40 @@
 {
 	return sys_fadvise64_64(fd, offset, len, advice);
 }
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct vm_area_struct *vmm;
+
+	if (flags & MAP_FIXED) {
+		/* We do not accept a shared mapping if it would violate
+		 * cache aliasing constraints.
+		 */
+		if ((flags & MAP_SHARED) &&
+				((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+			return -EINVAL;
+		return addr;
+	}
+
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+	if (!addr)
+		addr = TASK_UNMAPPED_BASE;
+
+	if (flags & MAP_SHARED)
+		addr = COLOUR_ALIGN(addr, pgoff);
+	else
+		addr = PAGE_ALIGN(addr);
+
+	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+		/* At this point:  (!vmm || addr < vmm->vm_end). */
+		if (TASK_SIZE - len < addr)
+			return -ENOMEM;
+		if (!vmm || addr + len <= vmm->vm_start)
+			return addr;
+		addr = vmm->vm_end;
+		if (flags & MAP_SHARED)
+			addr = COLOUR_ALIGN(addr, pgoff);
+	}
+}