powerpc: Ensure random space between stack and mmaps
get_random_int() returns the same value within a 1 jiffy interval. This means
that the mmap and stack regions will almost always end up the same distance
apart, making a relative offset based attack possible.
To fix this, shift the randomness we use for the mmap region by 1 bit.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index f7376db..75dc7fa 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -46,6 +46,14 @@
return sysctl_legacy_va_layout;
}
+/*
+ * Since get_random_int() returns the same value within a 1 jiffy window,
+ * we will almost always get the same randomisation for the stack and mmap
+ * region. This will mean the relative distance between stack and mmap will
+ * be the same.
+ *
+ * To avoid this we can shift the randomness by 1 bit.
+ */
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
@@ -53,11 +61,11 @@
if (current->flags & PF_RANDOMIZE) {
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+ rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT)));
else
- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+ rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT)));
}
- return rnd << PAGE_SHIFT;
+ return (rnd << PAGE_SHIFT) * 2;
}
static inline unsigned long mmap_base(void)