| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 3 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 4 |  * for more details. | 
 | 5 |  * | 
 | 6 |  * Copyright (C) 2011 Wind River Systems, | 
 | 7 |  *   written by Ralf Baechle <ralf@linux-mips.org> | 
 | 8 |  */ | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 9 | #include <linux/compiler.h> | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 10 | #include <linux/errno.h> | 
 | 11 | #include <linux/mm.h> | 
 | 12 | #include <linux/mman.h> | 
 | 13 | #include <linux/module.h> | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 14 | #include <linux/personality.h> | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 15 | #include <linux/random.h> | 
 | 16 | #include <linux/sched.h> | 
 | 17 |  | 
 | 18 | unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */ | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 19 | EXPORT_SYMBOL(shm_align_mask); | 
 | 20 |  | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 21 | /* gap between mmap and stack */ | 
 | 22 | #define MIN_GAP (128*1024*1024UL) | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 23 | #define MAX_GAP ((TASK_SIZE)/6*5) | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 24 |  | 
 | 25 | static int mmap_is_legacy(void) | 
 | 26 | { | 
 | 27 | 	if (current->personality & ADDR_COMPAT_LAYOUT) | 
 | 28 | 		return 1; | 
 | 29 |  | 
 | 30 | 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) | 
 | 31 | 		return 1; | 
 | 32 |  | 
 | 33 | 	return sysctl_legacy_va_layout; | 
 | 34 | } | 
 | 35 |  | 
 | 36 | static unsigned long mmap_base(unsigned long rnd) | 
 | 37 | { | 
 | 38 | 	unsigned long gap = rlimit(RLIMIT_STACK); | 
 | 39 |  | 
 | 40 | 	if (gap < MIN_GAP) | 
 | 41 | 		gap = MIN_GAP; | 
 | 42 | 	else if (gap > MAX_GAP) | 
 | 43 | 		gap = MAX_GAP; | 
 | 44 |  | 
 | 45 | 	return PAGE_ALIGN(TASK_SIZE - gap - rnd); | 
 | 46 | } | 
 | 47 |  | 
 | 48 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | 
 | 49 | 					      unsigned long pgoff) | 
 | 50 | { | 
 | 51 | 	unsigned long base = addr & ~shm_align_mask; | 
 | 52 | 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; | 
 | 53 |  | 
 | 54 | 	if (base + off <= addr) | 
 | 55 | 		return base + off; | 
 | 56 |  | 
 | 57 | 	return base - off; | 
 | 58 | } | 
 | 59 |  | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 60 | #define COLOUR_ALIGN(addr, pgoff)				\ | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 61 | 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\ | 
 | 62 | 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | 
 | 63 |  | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 64 | enum mmap_allocation_direction {UP, DOWN}; | 
 | 65 |  | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 66 | static unsigned long arch_get_unmapped_area_common(struct file *filp, | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 67 | 	unsigned long addr0, unsigned long len, unsigned long pgoff, | 
 | 68 | 	unsigned long flags, enum mmap_allocation_direction dir) | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 69 | { | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 70 | 	struct mm_struct *mm = current->mm; | 
 | 71 | 	struct vm_area_struct *vma; | 
 | 72 | 	unsigned long addr = addr0; | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 73 | 	int do_color_align; | 
 | 74 |  | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 75 | 	if (unlikely(len > TASK_SIZE)) | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 76 | 		return -ENOMEM; | 
 | 77 |  | 
 | 78 | 	if (flags & MAP_FIXED) { | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 79 | 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */ | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 80 | 		if (TASK_SIZE - len < addr) | 
 | 81 | 			return -EINVAL; | 
 | 82 |  | 
 | 83 | 		/* | 
 | 84 | 		 * We do not accept a shared mapping if it would violate | 
 | 85 | 		 * cache aliasing constraints. | 
 | 86 | 		 */ | 
 | 87 | 		if ((flags & MAP_SHARED) && | 
 | 88 | 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | 
 | 89 | 			return -EINVAL; | 
 | 90 | 		return addr; | 
 | 91 | 	} | 
 | 92 |  | 
 | 93 | 	do_color_align = 0; | 
 | 94 | 	if (filp || (flags & MAP_SHARED)) | 
 | 95 | 		do_color_align = 1; | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 96 |  | 
 | 97 | 	/* requesting a specific address */ | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 98 | 	if (addr) { | 
 | 99 | 		if (do_color_align) | 
 | 100 | 			addr = COLOUR_ALIGN(addr, pgoff); | 
 | 101 | 		else | 
 | 102 | 			addr = PAGE_ALIGN(addr); | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 103 |  | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 104 | 		vma = find_vma(mm, addr); | 
 | 105 | 		if (TASK_SIZE - len >= addr && | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 106 | 		    (!vma || addr + len <= vma->vm_start)) | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 107 | 			return addr; | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 108 | 	} | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 109 |  | 
 | 110 | 	if (dir == UP) { | 
 | 111 | 		addr = mm->mmap_base; | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 112 | 		if (do_color_align) | 
 | 113 | 			addr = COLOUR_ALIGN(addr, pgoff); | 
 | 114 | 		else | 
 | 115 | 			addr = PAGE_ALIGN(addr); | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 116 |  | 
 | 117 | 		for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { | 
 | 118 | 			/* At this point:  (!vma || addr < vma->vm_end). */ | 
 | 119 | 			if (TASK_SIZE - len < addr) | 
 | 120 | 				return -ENOMEM; | 
 | 121 | 			if (!vma || addr + len <= vma->vm_start) | 
 | 122 | 				return addr; | 
 | 123 | 			addr = vma->vm_end; | 
 | 124 | 			if (do_color_align) | 
 | 125 | 				addr = COLOUR_ALIGN(addr, pgoff); | 
 | 126 | 		 } | 
 | 127 | 	 } else { | 
 | 128 | 		/* check if free_area_cache is useful for us */ | 
 | 129 | 		if (len <= mm->cached_hole_size) { | 
 | 130 | 			mm->cached_hole_size = 0; | 
 | 131 | 			mm->free_area_cache = mm->mmap_base; | 
 | 132 | 		} | 
 | 133 |  | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 134 | 		/* | 
 | 135 | 		 * either no address requested, or the mapping can't fit into | 
 | 136 | 		 * the requested address hole | 
 | 137 | 		 */ | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 138 | 		addr = mm->free_area_cache; | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 139 | 		if (do_color_align) { | 
 | 140 | 			unsigned long base = | 
 | 141 | 				COLOUR_ALIGN_DOWN(addr - len, pgoff); | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 142 | 			addr = base + len; | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 143 | 		} | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 144 |  | 
 | 145 | 		/* make sure it can fit in the remaining address space */ | 
 | 146 | 		if (likely(addr > len)) { | 
 | 147 | 			vma = find_vma(mm, addr - len); | 
 | 148 | 			if (!vma || addr <= vma->vm_start) { | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 149 | 				/* cache the address as a hint for next time */ | 
 | 150 | 				return mm->free_area_cache = addr - len; | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 151 | 			} | 
 | 152 | 		} | 
 | 153 |  | 
 | 154 | 		if (unlikely(mm->mmap_base < len)) | 
 | 155 | 			goto bottomup; | 
 | 156 |  | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 157 | 		addr = mm->mmap_base - len; | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 158 | 		if (do_color_align) | 
 | 159 | 			addr = COLOUR_ALIGN_DOWN(addr, pgoff); | 
 | 160 |  | 
 | 161 | 		do { | 
 | 162 | 			/* | 
 | 163 | 			 * Lookup failure means no vma is above this address, | 
 | 164 | 			 * else if new region fits below vma->vm_start, | 
 | 165 | 			 * return with success: | 
 | 166 | 			 */ | 
 | 167 | 			vma = find_vma(mm, addr); | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 168 | 			if (likely(!vma || addr + len <= vma->vm_start)) { | 
 | 169 | 				/* cache the address as a hint for next time */ | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 170 | 				return mm->free_area_cache = addr; | 
 | 171 | 			} | 
 | 172 |  | 
 | 173 | 			/* remember the largest hole we saw so far */ | 
 | 174 | 			if (addr + mm->cached_hole_size < vma->vm_start) | 
 | 175 | 				mm->cached_hole_size = vma->vm_start - addr; | 
 | 176 |  | 
 | 177 | 			/* try just below the current vma->vm_start */ | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 178 | 			addr = vma->vm_start - len; | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 179 | 			if (do_color_align) | 
 | 180 | 				addr = COLOUR_ALIGN_DOWN(addr, pgoff); | 
 | 181 | 		} while (likely(len < vma->vm_start)); | 
 | 182 |  | 
 | 183 | bottomup: | 
 | 184 | 		/* | 
 | 185 | 		 * A failed mmap() very likely causes application failure, | 
 | 186 | 		 * so fall back to the bottom-up function here. This scenario | 
 | 187 | 		 * can happen with large stack limits and large mmap() | 
 | 188 | 		 * allocations. | 
 | 189 | 		 */ | 
 | 190 | 		mm->cached_hole_size = ~0UL; | 
 | 191 | 		mm->free_area_cache = TASK_UNMAPPED_BASE; | 
 | 192 | 		addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | 
 | 193 | 		/* | 
 | 194 | 		 * Restore the topdown base: | 
 | 195 | 		 */ | 
 | 196 | 		mm->free_area_cache = mm->mmap_base; | 
 | 197 | 		mm->cached_hole_size = ~0UL; | 
 | 198 |  | 
 | 199 | 		return addr; | 
 | 200 | 	} | 
 | 201 | } | 
 | 202 |  | 
 | 203 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, | 
 | 204 | 	unsigned long len, unsigned long pgoff, unsigned long flags) | 
 | 205 | { | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 206 | 	return arch_get_unmapped_area_common(filp, | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 207 | 			addr0, len, pgoff, flags, UP); | 
 | 208 | } | 
 | 209 |  | 
 | 210 | /* | 
 | 211 |  * There is no need to export this but sched.h declares the function as | 
 | 212 |  * extern so making it static here results in an error. | 
 | 213 |  */ | 
 | 214 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, | 
 | 215 | 	unsigned long addr0, unsigned long len, unsigned long pgoff, | 
 | 216 | 	unsigned long flags) | 
 | 217 | { | 
| Kevin Cernekee | 1665010 | 2011-06-18 11:28:48 -0700 | [diff] [blame] | 218 | 	return arch_get_unmapped_area_common(filp, | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 219 | 			addr0, len, pgoff, flags, DOWN); | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 220 | } | 
 | 221 |  | 
 | 222 | void arch_pick_mmap_layout(struct mm_struct *mm) | 
 | 223 | { | 
 | 224 | 	unsigned long random_factor = 0UL; | 
 | 225 |  | 
 | 226 | 	if (current->flags & PF_RANDOMIZE) { | 
 | 227 | 		random_factor = get_random_int(); | 
 | 228 | 		random_factor = random_factor << PAGE_SHIFT; | 
 | 229 | 		if (TASK_IS_32BIT_ADDR) | 
 | 230 | 			random_factor &= 0xfffffful; | 
 | 231 | 		else | 
 | 232 | 			random_factor &= 0xffffffful; | 
 | 233 | 	} | 
 | 234 |  | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 235 | 	if (mmap_is_legacy()) { | 
 | 236 | 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | 
 | 237 | 		mm->get_unmapped_area = arch_get_unmapped_area; | 
 | 238 | 		mm->unmap_area = arch_unmap_area; | 
 | 239 | 	} else { | 
 | 240 | 		mm->mmap_base = mmap_base(random_factor); | 
 | 241 | 		mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 
 | 242 | 		mm->unmap_area = arch_unmap_area_topdown; | 
 | 243 | 	} | 
| Ralf Baechle | 6f6c3c3 | 2011-05-19 09:21:33 +0100 | [diff] [blame] | 244 | } | 
 | 245 |  | 
 | 246 | static inline unsigned long brk_rnd(void) | 
 | 247 | { | 
 | 248 | 	unsigned long rnd = get_random_int(); | 
 | 249 |  | 
 | 250 | 	rnd = rnd << PAGE_SHIFT; | 
 | 251 | 	/* 8MB for 32bit, 256MB for 64bit */ | 
 | 252 | 	if (TASK_IS_32BIT_ADDR) | 
 | 253 | 		rnd = rnd & 0x7ffffful; | 
 | 254 | 	else | 
 | 255 | 		rnd = rnd & 0xffffffful; | 
 | 256 |  | 
 | 257 | 	return rnd; | 
 | 258 | } | 
 | 259 |  | 
 | 260 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 
 | 261 | { | 
 | 262 | 	unsigned long base = mm->brk; | 
 | 263 | 	unsigned long ret; | 
 | 264 |  | 
 | 265 | 	ret = PAGE_ALIGN(base + brk_rnd()); | 
 | 266 |  | 
 | 267 | 	if (ret < mm->brk) | 
 | 268 | 		return mm->brk; | 
 | 269 |  | 
 | 270 | 	return ret; | 
 | 271 | } |