| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/nommu.c | 
 | 3 |  * | 
 | 4 |  *  Replacement code for mm functions to support CPU's that don't | 
 | 5 |  *  have any form of memory management unit (thus no virtual memory). | 
 | 6 |  * | 
 | 7 |  *  See Documentation/nommu-mmap.txt | 
 | 8 |  * | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 9 |  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 |  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> | 
 | 11 |  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> | 
 | 12 |  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com> | 
| Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 13 |  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  */ | 
 | 15 |  | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 16 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> | 
 | 18 | #include <linux/mman.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/file.h> | 
 | 21 | #include <linux/highmem.h> | 
 | 22 | #include <linux/pagemap.h> | 
 | 23 | #include <linux/slab.h> | 
 | 24 | #include <linux/vmalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/blkdev.h> | 
 | 26 | #include <linux/backing-dev.h> | 
 | 27 | #include <linux/mount.h> | 
 | 28 | #include <linux/personality.h> | 
 | 29 | #include <linux/security.h> | 
 | 30 | #include <linux/syscalls.h> | 
| Al Viro | 120a795 | 2010-10-30 02:54:44 -0400 | [diff] [blame] | 31 | #include <linux/audit.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
 | 33 | #include <asm/uaccess.h> | 
 | 34 | #include <asm/tlb.h> | 
 | 35 | #include <asm/tlbflush.h> | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 36 | #include <asm/mmu_context.h> | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 37 | #include "internal.h" | 
 | 38 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 39 | #if 0 | 
 | 40 | #define kenter(FMT, ...) \ | 
 | 41 | 	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | 
 | 42 | #define kleave(FMT, ...) \ | 
 | 43 | 	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | 
 | 44 | #define kdebug(FMT, ...) \ | 
 | 45 | 	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) | 
 | 46 | #else | 
 | 47 | #define kenter(FMT, ...) \ | 
 | 48 | 	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | 
 | 49 | #define kleave(FMT, ...) \ | 
 | 50 | 	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | 
 | 51 | #define kdebug(FMT, ...) \ | 
 | 52 | 	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) | 
 | 53 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
 | 55 | void *high_memory; | 
 | 56 | struct page *mem_map; | 
 | 57 | unsigned long max_mapnr; | 
 | 58 | unsigned long num_physpages; | 
| Hugh Dickins | 4266c97 | 2009-09-23 17:05:53 +0100 | [diff] [blame] | 59 | unsigned long highest_memmap_pfn; | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 60 | struct percpu_counter vm_committed_as; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 
 | 62 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 
 | 63 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | 
| David Howells | fc4d5c2 | 2009-05-06 16:03:05 -0700 | [diff] [blame] | 64 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | int heap_stack_gap = 0; | 
 | 66 |  | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 67 | atomic_long_t mmap_pages_allocated; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 68 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | EXPORT_SYMBOL(mem_map); | 
| Wu, Bryan | 6a04de6 | 2007-04-11 23:28:47 -0700 | [diff] [blame] | 70 | EXPORT_SYMBOL(num_physpages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 72 | /* list of mapped, potentially shareable regions */ | 
 | 73 | static struct kmem_cache *vm_region_jar; | 
 | 74 | struct rb_root nommu_region_tree = RB_ROOT; | 
 | 75 | DECLARE_RWSEM(nommu_region_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 77 | const struct vm_operations_struct generic_file_vm_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | }; | 
 | 79 |  | 
 | 80 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 |  * Return the total memory allocated for this pointer, not | 
 | 82 |  * just what the caller asked for. | 
 | 83 |  * | 
 | 84 |  * Doesn't have to be accurate, i.e. may have races. | 
 | 85 |  */ | 
 | 86 | unsigned int kobjsize(const void *objp) | 
 | 87 | { | 
 | 88 | 	struct page *page; | 
 | 89 |  | 
| Michael Hennerich | 4016a13 | 2008-04-28 02:13:38 -0700 | [diff] [blame] | 90 | 	/* | 
 | 91 | 	 * If the object we have should not have ksize performed on it, | 
 | 92 | 	 * return size of 0 | 
 | 93 | 	 */ | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 94 | 	if (!objp || !virt_addr_valid(objp)) | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 95 | 		return 0; | 
 | 96 |  | 
 | 97 | 	page = virt_to_head_page(objp); | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 98 |  | 
 | 99 | 	/* | 
 | 100 | 	 * If the allocator sets PageSlab, we know the pointer came from | 
 | 101 | 	 * kmalloc(). | 
 | 102 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | 	if (PageSlab(page)) | 
 | 104 | 		return ksize(objp); | 
 | 105 |  | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 106 | 	/* | 
| Paul Mundt | ab2e83e | 2009-01-08 12:04:48 +0000 | [diff] [blame] | 107 | 	 * If it's not a compound page, see if we have a matching VMA | 
 | 108 | 	 * region. This test is intentionally done in reverse order, | 
 | 109 | 	 * so if there's no VMA, we still fall through and hand back | 
 | 110 | 	 * PAGE_SIZE for 0-order pages. | 
 | 111 | 	 */ | 
 | 112 | 	if (!PageCompound(page)) { | 
 | 113 | 		struct vm_area_struct *vma; | 
 | 114 |  | 
 | 115 | 		vma = find_vma(current->mm, (unsigned long)objp); | 
 | 116 | 		if (vma) | 
 | 117 | 			return vma->vm_end - vma->vm_start; | 
 | 118 | 	} | 
 | 119 |  | 
 | 120 | 	/* | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 121 | 	 * The ksize() function is only guaranteed to work for pointers | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 122 | 	 * returned by kmalloc(). So handle arbitrary pointers here. | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 123 | 	 */ | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 124 | 	return PAGE_SIZE << compound_order(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | } | 
 | 126 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 127 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
| Hugh Dickins | 4266c97 | 2009-09-23 17:05:53 +0100 | [diff] [blame] | 128 | 		     unsigned long start, int nr_pages, unsigned int foll_flags, | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 129 | 		     struct page **pages, struct vm_area_struct **vmas, | 
 | 130 | 		     int *retry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 132 | 	struct vm_area_struct *vma; | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 133 | 	unsigned long vm_flags; | 
 | 134 | 	int i; | 
 | 135 |  | 
 | 136 | 	/* calculate required read or write permissions. | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 137 | 	 * If FOLL_FORCE is set, we only require the "MAY" flags. | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 138 | 	 */ | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 139 | 	vm_flags  = (foll_flags & FOLL_WRITE) ? | 
 | 140 | 			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 
 | 141 | 	vm_flags &= (foll_flags & FOLL_FORCE) ? | 
 | 142 | 			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 |  | 
| Peter Zijlstra | 9d73777 | 2009-06-25 11:58:55 +0200 | [diff] [blame] | 144 | 	for (i = 0; i < nr_pages; i++) { | 
| David Howells | 7561e8c | 2010-03-25 16:48:38 +0000 | [diff] [blame] | 145 | 		vma = find_vma(mm, start); | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 146 | 		if (!vma) | 
 | 147 | 			goto finish_or_fault; | 
 | 148 |  | 
 | 149 | 		/* protect what we can, including chardevs */ | 
| Hugh Dickins | 1c3aff1 | 2009-09-21 17:03:24 -0700 | [diff] [blame] | 150 | 		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || | 
 | 151 | 		    !(vm_flags & vma->vm_flags)) | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 152 | 			goto finish_or_fault; | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 153 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | 		if (pages) { | 
 | 155 | 			pages[i] = virt_to_page(start); | 
 | 156 | 			if (pages[i]) | 
 | 157 | 				page_cache_get(pages[i]); | 
 | 158 | 		} | 
 | 159 | 		if (vmas) | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 160 | 			vmas[i] = vma; | 
| David Howells | e1ee65d | 2010-03-25 16:48:44 +0000 | [diff] [blame] | 161 | 		start = (start + PAGE_SIZE) & PAGE_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | 	} | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 163 |  | 
 | 164 | 	return i; | 
 | 165 |  | 
 | 166 | finish_or_fault: | 
 | 167 | 	return i ? : -EFAULT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | } | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 169 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 170 | /* | 
 | 171 |  * get a list of pages in an address range belonging to the specified process | 
 | 172 |  * and indicate the VMA that covers each page | 
 | 173 |  * - this is potentially dodgy as we may end incrementing the page count of a | 
 | 174 |  *   slab page or a secondary page from a compound page | 
 | 175 |  * - don't permit access to VMAs that don't support it, such as I/O mappings | 
 | 176 |  */ | 
 | 177 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
| Peter Zijlstra | 9d73777 | 2009-06-25 11:58:55 +0200 | [diff] [blame] | 178 | 	unsigned long start, int nr_pages, int write, int force, | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 179 | 	struct page **pages, struct vm_area_struct **vmas) | 
 | 180 | { | 
 | 181 | 	int flags = 0; | 
 | 182 |  | 
 | 183 | 	if (write) | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 184 | 		flags |= FOLL_WRITE; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 185 | 	if (force) | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 186 | 		flags |= FOLL_FORCE; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 187 |  | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 188 | 	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, | 
 | 189 | 				NULL); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 190 | } | 
| Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 191 | EXPORT_SYMBOL(get_user_pages); | 
 | 192 |  | 
| Paul Mundt | dfc2f91 | 2009-06-26 04:31:57 +0900 | [diff] [blame] | 193 | /** | 
 | 194 |  * follow_pfn - look up PFN at a user virtual address | 
 | 195 |  * @vma: memory mapping | 
 | 196 |  * @address: user virtual address | 
 | 197 |  * @pfn: location to store found PFN | 
 | 198 |  * | 
 | 199 |  * Only IO mappings and raw PFN mappings are allowed. | 
 | 200 |  * | 
 | 201 |  * Returns zero and the pfn at @pfn on success, -ve otherwise. | 
 | 202 |  */ | 
 | 203 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | 
 | 204 | 	unsigned long *pfn) | 
 | 205 | { | 
 | 206 | 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) | 
 | 207 | 		return -EINVAL; | 
 | 208 |  | 
 | 209 | 	*pfn = address >> PAGE_SHIFT; | 
 | 210 | 	return 0; | 
 | 211 | } | 
 | 212 | EXPORT_SYMBOL(follow_pfn); | 
 | 213 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | DEFINE_RWLOCK(vmlist_lock); | 
 | 215 | struct vm_struct *vmlist; | 
 | 216 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 217 | void vfree(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | { | 
 | 219 | 	kfree(addr); | 
 | 220 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 221 | EXPORT_SYMBOL(vfree); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 223 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | { | 
 | 225 | 	/* | 
| Robert P. J. Day | 8518609 | 2007-10-19 23:11:38 +0200 | [diff] [blame] | 226 | 	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() | 
 | 227 | 	 * returns only a logical address. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | 	 */ | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 229 | 	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 231 | EXPORT_SYMBOL(__vmalloc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 |  | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 233 | void *vmalloc_user(unsigned long size) | 
 | 234 | { | 
 | 235 | 	void *ret; | 
 | 236 |  | 
 | 237 | 	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 
 | 238 | 			PAGE_KERNEL); | 
 | 239 | 	if (ret) { | 
 | 240 | 		struct vm_area_struct *vma; | 
 | 241 |  | 
 | 242 | 		down_write(¤t->mm->mmap_sem); | 
 | 243 | 		vma = find_vma(current->mm, (unsigned long)ret); | 
 | 244 | 		if (vma) | 
 | 245 | 			vma->vm_flags |= VM_USERMAP; | 
 | 246 | 		up_write(¤t->mm->mmap_sem); | 
 | 247 | 	} | 
 | 248 |  | 
 | 249 | 	return ret; | 
 | 250 | } | 
 | 251 | EXPORT_SYMBOL(vmalloc_user); | 
 | 252 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 253 | struct page *vmalloc_to_page(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | { | 
 | 255 | 	return virt_to_page(addr); | 
 | 256 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 257 | EXPORT_SYMBOL(vmalloc_to_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 259 | unsigned long vmalloc_to_pfn(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | { | 
 | 261 | 	return page_to_pfn(virt_to_page(addr)); | 
 | 262 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 263 | EXPORT_SYMBOL(vmalloc_to_pfn); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 |  | 
 | 265 | long vread(char *buf, char *addr, unsigned long count) | 
 | 266 | { | 
 | 267 | 	memcpy(buf, addr, count); | 
 | 268 | 	return count; | 
 | 269 | } | 
 | 270 |  | 
 | 271 | long vwrite(char *buf, char *addr, unsigned long count) | 
 | 272 | { | 
 | 273 | 	/* Don't allow overflow */ | 
 | 274 | 	if ((unsigned long) addr + count < count) | 
 | 275 | 		count = -(unsigned long) addr; | 
 | 276 |  | 
 | 277 | 	memcpy(addr, buf, count); | 
 | 278 | 	return(count); | 
 | 279 | } | 
 | 280 |  | 
 | 281 | /* | 
 | 282 |  *	vmalloc  -  allocate virtually continguos memory | 
 | 283 |  * | 
 | 284 |  *	@size:		allocation size | 
 | 285 |  * | 
 | 286 |  *	Allocate enough pages to cover @size from the page level | 
 | 287 |  *	allocator and map them into continguos kernel virtual space. | 
 | 288 |  * | 
| Michael Opdenacker | c1c8897 | 2006-10-03 23:21:02 +0200 | [diff] [blame] | 289 |  *	For tight control over page level allocator and protection flags | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 |  *	use __vmalloc() instead. | 
 | 291 |  */ | 
 | 292 | void *vmalloc(unsigned long size) | 
 | 293 | { | 
 | 294 |        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | 
 | 295 | } | 
| Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 296 | EXPORT_SYMBOL(vmalloc); | 
 | 297 |  | 
| Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 298 | /* | 
 | 299 |  *	vzalloc - allocate virtually continguos memory with zero fill | 
 | 300 |  * | 
 | 301 |  *	@size:		allocation size | 
 | 302 |  * | 
 | 303 |  *	Allocate enough pages to cover @size from the page level | 
 | 304 |  *	allocator and map them into continguos kernel virtual space. | 
 | 305 |  *	The memory allocated is set to zero. | 
 | 306 |  * | 
 | 307 |  *	For tight control over page level allocator and protection flags | 
 | 308 |  *	use __vmalloc() instead. | 
 | 309 |  */ | 
 | 310 | void *vzalloc(unsigned long size) | 
 | 311 | { | 
 | 312 | 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 
 | 313 | 			PAGE_KERNEL); | 
 | 314 | } | 
 | 315 | EXPORT_SYMBOL(vzalloc); | 
 | 316 |  | 
 | 317 | /** | 
 | 318 |  * vmalloc_node - allocate memory on a specific node | 
 | 319 |  * @size:	allocation size | 
 | 320 |  * @node:	numa node | 
 | 321 |  * | 
 | 322 |  * Allocate enough pages to cover @size from the page level | 
 | 323 |  * allocator and map them into contiguous kernel virtual space. | 
 | 324 |  * | 
 | 325 |  * For tight control over page level allocator and protection flags | 
 | 326 |  * use __vmalloc() instead. | 
 | 327 |  */ | 
| Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 328 | void *vmalloc_node(unsigned long size, int node) | 
 | 329 | { | 
 | 330 | 	return vmalloc(size); | 
 | 331 | } | 
| Paul Mundt | 9a14f65 | 2010-12-24 11:50:34 +0900 | [diff] [blame] | 332 | EXPORT_SYMBOL(vmalloc_node); | 
| Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 333 |  | 
 | 334 | /** | 
 | 335 |  * vzalloc_node - allocate memory on a specific node with zero fill | 
 | 336 |  * @size:	allocation size | 
 | 337 |  * @node:	numa node | 
 | 338 |  * | 
 | 339 |  * Allocate enough pages to cover @size from the page level | 
 | 340 |  * allocator and map them into contiguous kernel virtual space. | 
 | 341 |  * The memory allocated is set to zero. | 
 | 342 |  * | 
 | 343 |  * For tight control over page level allocator and protection flags | 
 | 344 |  * use __vmalloc() instead. | 
 | 345 |  */ | 
 | 346 | void *vzalloc_node(unsigned long size, int node) | 
 | 347 | { | 
 | 348 | 	return vzalloc(size); | 
 | 349 | } | 
 | 350 | EXPORT_SYMBOL(vzalloc_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 |  | 
| Paul Mundt | 1af446e | 2008-08-04 16:01:47 +0900 | [diff] [blame] | 352 | #ifndef PAGE_KERNEL_EXEC | 
 | 353 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | 
 | 354 | #endif | 
 | 355 |  | 
 | 356 | /** | 
 | 357 |  *	vmalloc_exec  -  allocate virtually contiguous, executable memory | 
 | 358 |  *	@size:		allocation size | 
 | 359 |  * | 
 | 360 |  *	Kernel-internal function to allocate enough pages to cover @size | 
 | 361 |  *	the page level allocator and map them into contiguous and | 
 | 362 |  *	executable kernel virtual space. | 
 | 363 |  * | 
 | 364 |  *	For tight control over page level allocator and protection flags | 
 | 365 |  *	use __vmalloc() instead. | 
 | 366 |  */ | 
 | 367 |  | 
 | 368 | void *vmalloc_exec(unsigned long size) | 
 | 369 | { | 
 | 370 | 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | 
 | 371 | } | 
 | 372 |  | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 373 | /** | 
 | 374 |  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 |  *	@size:		allocation size | 
 | 376 |  * | 
 | 377 |  *	Allocate enough 32bit PA addressable pages to cover @size from the | 
 | 378 |  *	page level allocator and map them into continguos kernel virtual space. | 
 | 379 |  */ | 
 | 380 | void *vmalloc_32(unsigned long size) | 
 | 381 | { | 
 | 382 | 	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); | 
 | 383 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 384 | EXPORT_SYMBOL(vmalloc_32); | 
 | 385 |  | 
 | 386 | /** | 
 | 387 |  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory | 
 | 388 |  *	@size:		allocation size | 
 | 389 |  * | 
 | 390 |  * The resulting memory area is 32bit addressable and zeroed so it can be | 
 | 391 |  * mapped to userspace without leaking data. | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 392 |  * | 
 | 393 |  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to | 
 | 394 |  * remap_vmalloc_range() are permissible. | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 395 |  */ | 
 | 396 | void *vmalloc_32_user(unsigned long size) | 
 | 397 | { | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 398 | 	/* | 
 | 399 | 	 * We'll have to sort out the ZONE_DMA bits for 64-bit, | 
 | 400 | 	 * but for now this can simply use vmalloc_user() directly. | 
 | 401 | 	 */ | 
 | 402 | 	return vmalloc_user(size); | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 403 | } | 
 | 404 | EXPORT_SYMBOL(vmalloc_32_user); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 |  | 
 | 406 | void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) | 
 | 407 | { | 
 | 408 | 	BUG(); | 
 | 409 | 	return NULL; | 
 | 410 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 411 | EXPORT_SYMBOL(vmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 413 | void vunmap(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | { | 
 | 415 | 	BUG(); | 
 | 416 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 417 | EXPORT_SYMBOL(vunmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 |  | 
| Paul Mundt | eb6434d | 2009-01-21 17:45:47 +0900 | [diff] [blame] | 419 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) | 
 | 420 | { | 
 | 421 | 	BUG(); | 
 | 422 | 	return NULL; | 
 | 423 | } | 
 | 424 | EXPORT_SYMBOL(vm_map_ram); | 
 | 425 |  | 
 | 426 | void vm_unmap_ram(const void *mem, unsigned int count) | 
 | 427 | { | 
 | 428 | 	BUG(); | 
 | 429 | } | 
 | 430 | EXPORT_SYMBOL(vm_unmap_ram); | 
 | 431 |  | 
 | 432 | void vm_unmap_aliases(void) | 
 | 433 | { | 
 | 434 | } | 
 | 435 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); | 
 | 436 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | /* | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 438 |  * Implement a stub for vmalloc_sync_all() if the architecture chose not to | 
 | 439 |  * have one. | 
 | 440 |  */ | 
 | 441 | void  __attribute__((weak)) vmalloc_sync_all(void) | 
 | 442 | { | 
 | 443 | } | 
 | 444 |  | 
| Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 445 | /** | 
 | 446 |  *	alloc_vm_area - allocate a range of kernel address space | 
 | 447 |  *	@size:		size of the area | 
 | 448 |  * | 
 | 449 |  *	Returns:	NULL on failure, vm_struct on success | 
 | 450 |  * | 
 | 451 |  *	This function reserves a range of kernel address space, and | 
 | 452 |  *	allocates pagetables to map that range.  No actual mappings | 
 | 453 |  *	are created.  If the kernel address space is not shared | 
 | 454 |  *	between processes, it syncs the pagetable across all | 
 | 455 |  *	processes. | 
 | 456 |  */ | 
| David Vrabel | cd12909 | 2011-09-29 16:53:32 +0100 | [diff] [blame] | 457 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) | 
| Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 458 | { | 
 | 459 | 	BUG(); | 
 | 460 | 	return NULL; | 
 | 461 | } | 
 | 462 | EXPORT_SYMBOL_GPL(alloc_vm_area); | 
 | 463 |  | 
 | 464 | void free_vm_area(struct vm_struct *area) | 
 | 465 | { | 
 | 466 | 	BUG(); | 
 | 467 | } | 
 | 468 | EXPORT_SYMBOL_GPL(free_vm_area); | 
 | 469 |  | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 470 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, | 
 | 471 | 		   struct page *page) | 
 | 472 | { | 
 | 473 | 	return -EINVAL; | 
 | 474 | } | 
 | 475 | EXPORT_SYMBOL(vm_insert_page); | 
 | 476 |  | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 477 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 |  *  sys_brk() for the most part doesn't need the global kernel | 
 | 479 |  *  lock, except when an application is doing something nasty | 
 | 480 |  *  like trying to un-brk an area that has already been mapped | 
 | 481 |  *  to a regular file.  in this case, the unmapping will need | 
 | 482 |  *  to invoke file system routines that need the global lock. | 
 | 483 |  */ | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 484 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | { | 
 | 486 | 	struct mm_struct *mm = current->mm; | 
 | 487 |  | 
 | 488 | 	if (brk < mm->start_brk || brk > mm->context.end_brk) | 
 | 489 | 		return mm->brk; | 
 | 490 |  | 
 | 491 | 	if (mm->brk == brk) | 
 | 492 | 		return mm->brk; | 
 | 493 |  | 
 | 494 | 	/* | 
 | 495 | 	 * Always allow shrinking brk | 
 | 496 | 	 */ | 
 | 497 | 	if (brk <= mm->brk) { | 
 | 498 | 		mm->brk = brk; | 
 | 499 | 		return brk; | 
 | 500 | 	} | 
 | 501 |  | 
 | 502 | 	/* | 
 | 503 | 	 * Ok, looks good - let it rip. | 
 | 504 | 	 */ | 
| Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 505 | 	flush_icache_range(mm->brk, brk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | 	return mm->brk = brk; | 
 | 507 | } | 
 | 508 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 509 | /* | 
 | 510 |  * initialise the VMA and region record slabs | 
 | 511 |  */ | 
 | 512 | void __init mmap_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | { | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 514 | 	int ret; | 
 | 515 |  | 
 | 516 | 	ret = percpu_counter_init(&vm_committed_as, 0); | 
 | 517 | 	VM_BUG_ON(ret); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 518 | 	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 519 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 521 | /* | 
 | 522 |  * validate the region tree | 
 | 523 |  * - the caller must hold the region lock | 
 | 524 |  */ | 
 | 525 | #ifdef CONFIG_DEBUG_NOMMU_REGIONS | 
 | 526 | static noinline void validate_nommu_regions(void) | 
 | 527 | { | 
 | 528 | 	struct vm_region *region, *last; | 
 | 529 | 	struct rb_node *p, *lastp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 531 | 	lastp = rb_first(&nommu_region_tree); | 
 | 532 | 	if (!lastp) | 
 | 533 | 		return; | 
 | 534 |  | 
 | 535 | 	last = rb_entry(lastp, struct vm_region, vm_rb); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 536 | 	BUG_ON(unlikely(last->vm_end <= last->vm_start)); | 
 | 537 | 	BUG_ON(unlikely(last->vm_top < last->vm_end)); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 538 |  | 
 | 539 | 	while ((p = rb_next(lastp))) { | 
 | 540 | 		region = rb_entry(p, struct vm_region, vm_rb); | 
 | 541 | 		last = rb_entry(lastp, struct vm_region, vm_rb); | 
 | 542 |  | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 543 | 		BUG_ON(unlikely(region->vm_end <= region->vm_start)); | 
 | 544 | 		BUG_ON(unlikely(region->vm_top < region->vm_end)); | 
 | 545 | 		BUG_ON(unlikely(region->vm_start < last->vm_top)); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 546 |  | 
 | 547 | 		lastp = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | 	} | 
 | 549 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 550 | #else | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 551 | static void validate_nommu_regions(void) | 
 | 552 | { | 
 | 553 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 554 | #endif | 
 | 555 |  | 
 | 556 | /* | 
 | 557 |  * add a region into the global tree | 
 | 558 |  */ | 
 | 559 | static void add_nommu_region(struct vm_region *region) | 
 | 560 | { | 
 | 561 | 	struct vm_region *pregion; | 
 | 562 | 	struct rb_node **p, *parent; | 
 | 563 |  | 
 | 564 | 	validate_nommu_regions(); | 
 | 565 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 566 | 	parent = NULL; | 
 | 567 | 	p = &nommu_region_tree.rb_node; | 
 | 568 | 	while (*p) { | 
 | 569 | 		parent = *p; | 
 | 570 | 		pregion = rb_entry(parent, struct vm_region, vm_rb); | 
 | 571 | 		if (region->vm_start < pregion->vm_start) | 
 | 572 | 			p = &(*p)->rb_left; | 
 | 573 | 		else if (region->vm_start > pregion->vm_start) | 
 | 574 | 			p = &(*p)->rb_right; | 
 | 575 | 		else if (pregion == region) | 
 | 576 | 			return; | 
 | 577 | 		else | 
 | 578 | 			BUG(); | 
 | 579 | 	} | 
 | 580 |  | 
 | 581 | 	rb_link_node(®ion->vm_rb, parent, p); | 
 | 582 | 	rb_insert_color(®ion->vm_rb, &nommu_region_tree); | 
 | 583 |  | 
 | 584 | 	validate_nommu_regions(); | 
 | 585 | } | 
 | 586 |  | 
 | 587 | /* | 
 | 588 |  * delete a region from the global tree | 
 | 589 |  */ | 
 | 590 | static void delete_nommu_region(struct vm_region *region) | 
 | 591 | { | 
 | 592 | 	BUG_ON(!nommu_region_tree.rb_node); | 
 | 593 |  | 
 | 594 | 	validate_nommu_regions(); | 
 | 595 | 	rb_erase(®ion->vm_rb, &nommu_region_tree); | 
 | 596 | 	validate_nommu_regions(); | 
 | 597 | } | 
 | 598 |  | 
 | 599 | /* | 
 | 600 |  * free a contiguous series of pages | 
 | 601 |  */ | 
 | 602 | static void free_page_series(unsigned long from, unsigned long to) | 
 | 603 | { | 
 | 604 | 	for (; from < to; from += PAGE_SIZE) { | 
 | 605 | 		struct page *page = virt_to_page(from); | 
 | 606 |  | 
 | 607 | 		kdebug("- free %lx", from); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 608 | 		atomic_long_dec(&mmap_pages_allocated); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 609 | 		if (page_count(page) != 1) | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 610 | 			kdebug("free page %p: refcount not one: %d", | 
 | 611 | 			       page, page_count(page)); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 612 | 		put_page(page); | 
 | 613 | 	} | 
 | 614 | } | 
 | 615 |  | 
 | 616 | /* | 
 | 617 |  * release a reference to a region | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 618 |  * - the caller must hold the region semaphore for writing, which this releases | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 619 |  * - the region may not have been added to the tree yet, in which case vm_top | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 620 |  *   will equal vm_start | 
 | 621 |  */ | 
 | 622 | static void __put_nommu_region(struct vm_region *region) | 
 | 623 | 	__releases(nommu_region_sem) | 
 | 624 | { | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 625 | 	kenter("%p{%d}", region, region->vm_usage); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 626 |  | 
 | 627 | 	BUG_ON(!nommu_region_tree.rb_node); | 
 | 628 |  | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 629 | 	if (--region->vm_usage == 0) { | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 630 | 		if (region->vm_top > region->vm_start) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 631 | 			delete_nommu_region(region); | 
 | 632 | 		up_write(&nommu_region_sem); | 
 | 633 |  | 
 | 634 | 		if (region->vm_file) | 
 | 635 | 			fput(region->vm_file); | 
 | 636 |  | 
 | 637 | 		/* IO memory and memory shared directly out of the pagecache | 
 | 638 | 		 * from ramfs/tmpfs mustn't be released here */ | 
 | 639 | 		if (region->vm_flags & VM_MAPPED_COPY) { | 
 | 640 | 			kdebug("free series"); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 641 | 			free_page_series(region->vm_start, region->vm_top); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 642 | 		} | 
 | 643 | 		kmem_cache_free(vm_region_jar, region); | 
 | 644 | 	} else { | 
 | 645 | 		up_write(&nommu_region_sem); | 
 | 646 | 	} | 
 | 647 | } | 
 | 648 |  | 
 | 649 | /* | 
 | 650 |  * release a reference to a region | 
 | 651 |  */ | 
 | 652 | static void put_nommu_region(struct vm_region *region) | 
 | 653 | { | 
 | 654 | 	down_write(&nommu_region_sem); | 
 | 655 | 	__put_nommu_region(region); | 
 | 656 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 658 | /* | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 659 |  * update protection on a vma | 
 | 660 |  */ | 
 | 661 | static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | 
 | 662 | { | 
 | 663 | #ifdef CONFIG_MPU | 
 | 664 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 665 | 	long start = vma->vm_start & PAGE_MASK; | 
 | 666 | 	while (start < vma->vm_end) { | 
 | 667 | 		protect_page(mm, start, flags); | 
 | 668 | 		start += PAGE_SIZE; | 
 | 669 | 	} | 
 | 670 | 	update_protections(mm); | 
 | 671 | #endif | 
 | 672 | } | 
 | 673 |  | 
 | 674 | /* | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 675 |  * add a VMA into a process's mm_struct in the appropriate place in the list | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 676 |  * and tree and add to the address space's page tree also if not an anonymous | 
 | 677 |  * page | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 678 |  * - should be called with mm->mmap_sem held writelocked | 
 | 679 |  */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 680 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 681 | { | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 682 | 	struct vm_area_struct *pvma, *prev; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 683 | 	struct address_space *mapping; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 684 | 	struct rb_node **p, *parent, *rb_prev; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 685 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 686 | 	kenter(",%p", vma); | 
 | 687 |  | 
 | 688 | 	BUG_ON(!vma->vm_region); | 
 | 689 |  | 
 | 690 | 	mm->map_count++; | 
 | 691 | 	vma->vm_mm = mm; | 
 | 692 |  | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 693 | 	protect_vma(vma, vma->vm_flags); | 
 | 694 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 695 | 	/* add the VMA to the mapping */ | 
 | 696 | 	if (vma->vm_file) { | 
 | 697 | 		mapping = vma->vm_file->f_mapping; | 
 | 698 |  | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 699 | 		mutex_lock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 700 | 		flush_dcache_mmap_lock(mapping); | 
 | 701 | 		vma_prio_tree_insert(vma, &mapping->i_mmap); | 
 | 702 | 		flush_dcache_mmap_unlock(mapping); | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 703 | 		mutex_unlock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 704 | 	} | 
 | 705 |  | 
 | 706 | 	/* add the VMA to the tree */ | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 707 | 	parent = rb_prev = NULL; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 708 | 	p = &mm->mm_rb.rb_node; | 
 | 709 | 	while (*p) { | 
 | 710 | 		parent = *p; | 
 | 711 | 		pvma = rb_entry(parent, struct vm_area_struct, vm_rb); | 
 | 712 |  | 
 | 713 | 		/* sort by: start addr, end addr, VMA struct addr in that order | 
 | 714 | 		 * (the latter is necessary as we may get identical VMAs) */ | 
 | 715 | 		if (vma->vm_start < pvma->vm_start) | 
 | 716 | 			p = &(*p)->rb_left; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 717 | 		else if (vma->vm_start > pvma->vm_start) { | 
 | 718 | 			rb_prev = parent; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 719 | 			p = &(*p)->rb_right; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 720 | 		} else if (vma->vm_end < pvma->vm_end) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 721 | 			p = &(*p)->rb_left; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 722 | 		else if (vma->vm_end > pvma->vm_end) { | 
 | 723 | 			rb_prev = parent; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 724 | 			p = &(*p)->rb_right; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 725 | 		} else if (vma < pvma) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 726 | 			p = &(*p)->rb_left; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 727 | 		else if (vma > pvma) { | 
 | 728 | 			rb_prev = parent; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 729 | 			p = &(*p)->rb_right; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 730 | 		} else | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 731 | 			BUG(); | 
 | 732 | 	} | 
 | 733 |  | 
 | 734 | 	rb_link_node(&vma->vm_rb, parent, p); | 
 | 735 | 	rb_insert_color(&vma->vm_rb, &mm->mm_rb); | 
 | 736 |  | 
 | 737 | 	/* add VMA to the VMA list also */ | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 738 | 	prev = NULL; | 
 | 739 | 	if (rb_prev) | 
 | 740 | 		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 741 |  | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 742 | 	__vma_link_list(mm, vma, prev, parent); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 743 | } | 
 | 744 |  | 
 | 745 | /* | 
 | 746 |  * delete a VMA from its owning mm_struct and address space | 
 | 747 |  */ | 
 | 748 | static void delete_vma_from_mm(struct vm_area_struct *vma) | 
 | 749 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 750 | 	struct address_space *mapping; | 
 | 751 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 752 |  | 
 | 753 | 	kenter("%p", vma); | 
 | 754 |  | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 755 | 	protect_vma(vma, 0); | 
 | 756 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 757 | 	mm->map_count--; | 
 | 758 | 	if (mm->mmap_cache == vma) | 
 | 759 | 		mm->mmap_cache = NULL; | 
 | 760 |  | 
 | 761 | 	/* remove the VMA from the mapping */ | 
 | 762 | 	if (vma->vm_file) { | 
 | 763 | 		mapping = vma->vm_file->f_mapping; | 
 | 764 |  | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 765 | 		mutex_lock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 766 | 		flush_dcache_mmap_lock(mapping); | 
 | 767 | 		vma_prio_tree_remove(vma, &mapping->i_mmap); | 
 | 768 | 		flush_dcache_mmap_unlock(mapping); | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 769 | 		mutex_unlock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 770 | 	} | 
 | 771 |  | 
 | 772 | 	/* remove from the MM's tree and list */ | 
 | 773 | 	rb_erase(&vma->vm_rb, &mm->mm_rb); | 
| Namhyung Kim | b951bf2 | 2011-05-24 17:11:23 -0700 | [diff] [blame] | 774 |  | 
 | 775 | 	if (vma->vm_prev) | 
 | 776 | 		vma->vm_prev->vm_next = vma->vm_next; | 
 | 777 | 	else | 
 | 778 | 		mm->mmap = vma->vm_next; | 
 | 779 |  | 
 | 780 | 	if (vma->vm_next) | 
 | 781 | 		vma->vm_next->vm_prev = vma->vm_prev; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 782 | } | 
 | 783 |  | 
 | 784 | /* | 
 | 785 |  * destroy a VMA record | 
 | 786 |  */ | 
 | 787 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) | 
 | 788 | { | 
 | 789 | 	kenter("%p", vma); | 
 | 790 | 	if (vma->vm_ops && vma->vm_ops->close) | 
 | 791 | 		vma->vm_ops->close(vma); | 
 | 792 | 	if (vma->vm_file) { | 
 | 793 | 		fput(vma->vm_file); | 
 | 794 | 		if (vma->vm_flags & VM_EXECUTABLE) | 
 | 795 | 			removed_exe_file_vma(mm); | 
 | 796 | 	} | 
 | 797 | 	put_nommu_region(vma->vm_region); | 
 | 798 | 	kmem_cache_free(vm_area_cachep, vma); | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 799 | } | 
 | 800 |  | 
 | 801 | /* | 
 | 802 |  * look up the first VMA in which addr resides, NULL if none | 
 | 803 |  * - should be called with mm->mmap_sem at least held readlocked | 
 | 804 |  */ | 
 | 805 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | 
 | 806 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 807 | 	struct vm_area_struct *vma; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 808 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 809 | 	/* check the cache first */ | 
 | 810 | 	vma = mm->mmap_cache; | 
 | 811 | 	if (vma && vma->vm_start <= addr && vma->vm_end > addr) | 
 | 812 | 		return vma; | 
 | 813 |  | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 814 | 	/* trawl the list (there may be multiple mappings in which addr | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 815 | 	 * resides) */ | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 816 | 	for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 817 | 		if (vma->vm_start > addr) | 
 | 818 | 			return NULL; | 
 | 819 | 		if (vma->vm_end > addr) { | 
 | 820 | 			mm->mmap_cache = vma; | 
 | 821 | 			return vma; | 
 | 822 | 		} | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 823 | 	} | 
 | 824 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 825 | 	return NULL; | 
 | 826 | } | 
 | 827 | EXPORT_SYMBOL(find_vma); | 
 | 828 |  | 
 | 829 | /* | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 830 |  * find a VMA | 
 | 831 |  * - we don't extend stack VMAs under NOMMU conditions | 
 | 832 |  */ | 
 | 833 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | 
 | 834 | { | 
| David Howells | 7561e8c | 2010-03-25 16:48:38 +0000 | [diff] [blame] | 835 | 	return find_vma(mm, addr); | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 836 | } | 
 | 837 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 838 | /* | 
 | 839 |  * expand a stack to a given address | 
 | 840 |  * - not supported under NOMMU conditions | 
 | 841 |  */ | 
| Greg Ungerer | 57c8f63 | 2007-07-15 23:38:28 -0700 | [diff] [blame] | 842 | int expand_stack(struct vm_area_struct *vma, unsigned long address) | 
 | 843 | { | 
 | 844 | 	return -ENOMEM; | 
 | 845 | } | 
 | 846 |  | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 847 | /* | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 848 |  * look up the first VMA exactly that exactly matches addr | 
 | 849 |  * - should be called with mm->mmap_sem at least held readlocked | 
 | 850 |  */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 851 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, | 
 | 852 | 					     unsigned long addr, | 
 | 853 | 					     unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | { | 
 | 855 | 	struct vm_area_struct *vma; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 856 | 	unsigned long end = addr + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 858 | 	/* check the cache first */ | 
 | 859 | 	vma = mm->mmap_cache; | 
 | 860 | 	if (vma && vma->vm_start == addr && vma->vm_end == end) | 
 | 861 | 		return vma; | 
 | 862 |  | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 863 | 	/* trawl the list (there may be multiple mappings in which addr | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 864 | 	 * resides) */ | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 865 | 	for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 866 | 		if (vma->vm_start < addr) | 
 | 867 | 			continue; | 
 | 868 | 		if (vma->vm_start > addr) | 
 | 869 | 			return NULL; | 
 | 870 | 		if (vma->vm_end == end) { | 
 | 871 | 			mm->mmap_cache = vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | 			return vma; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 873 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | 	} | 
 | 875 |  | 
 | 876 | 	return NULL; | 
 | 877 | } | 
 | 878 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 879 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 |  * determine whether a mapping should be permitted and, if so, what sort of | 
 | 881 |  * mapping we're capable of supporting | 
 | 882 |  */ | 
 | 883 | static int validate_mmap_request(struct file *file, | 
 | 884 | 				 unsigned long addr, | 
 | 885 | 				 unsigned long len, | 
 | 886 | 				 unsigned long prot, | 
 | 887 | 				 unsigned long flags, | 
 | 888 | 				 unsigned long pgoff, | 
 | 889 | 				 unsigned long *_capabilities) | 
 | 890 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 891 | 	unsigned long capabilities, rlen; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | 	unsigned long reqprot = prot; | 
 | 893 | 	int ret; | 
 | 894 |  | 
 | 895 | 	/* do the simple checks first */ | 
| David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 896 | 	if (flags & MAP_FIXED) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | 		printk(KERN_DEBUG | 
 | 898 | 		       "%d: Can't do fixed-address/overlay mmap of RAM\n", | 
 | 899 | 		       current->pid); | 
 | 900 | 		return -EINVAL; | 
 | 901 | 	} | 
 | 902 |  | 
 | 903 | 	if ((flags & MAP_TYPE) != MAP_PRIVATE && | 
 | 904 | 	    (flags & MAP_TYPE) != MAP_SHARED) | 
 | 905 | 		return -EINVAL; | 
 | 906 |  | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 907 | 	if (!len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | 		return -EINVAL; | 
 | 909 |  | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 910 | 	/* Careful about overflows.. */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 911 | 	rlen = PAGE_ALIGN(len); | 
 | 912 | 	if (!rlen || rlen > TASK_SIZE) | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 913 | 		return -ENOMEM; | 
 | 914 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | 	/* offset overflow? */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 916 | 	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 917 | 		return -EOVERFLOW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 |  | 
 | 919 | 	if (file) { | 
 | 920 | 		/* validate file mapping requests */ | 
 | 921 | 		struct address_space *mapping; | 
 | 922 |  | 
 | 923 | 		/* files must support mmap */ | 
 | 924 | 		if (!file->f_op || !file->f_op->mmap) | 
 | 925 | 			return -ENODEV; | 
 | 926 |  | 
 | 927 | 		/* work out if what we've got could possibly be shared | 
 | 928 | 		 * - we support chardevs that provide their own "memory" | 
 | 929 | 		 * - we support files/blockdevs that are memory backed | 
 | 930 | 		 */ | 
 | 931 | 		mapping = file->f_mapping; | 
 | 932 | 		if (!mapping) | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 933 | 			mapping = file->f_path.dentry->d_inode->i_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 |  | 
 | 935 | 		capabilities = 0; | 
 | 936 | 		if (mapping && mapping->backing_dev_info) | 
 | 937 | 			capabilities = mapping->backing_dev_info->capabilities; | 
 | 938 |  | 
 | 939 | 		if (!capabilities) { | 
 | 940 | 			/* no explicit capabilities set, so assume some | 
 | 941 | 			 * defaults */ | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 942 | 			switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | 			case S_IFREG: | 
 | 944 | 			case S_IFBLK: | 
 | 945 | 				capabilities = BDI_CAP_MAP_COPY; | 
 | 946 | 				break; | 
 | 947 |  | 
 | 948 | 			case S_IFCHR: | 
 | 949 | 				capabilities = | 
 | 950 | 					BDI_CAP_MAP_DIRECT | | 
 | 951 | 					BDI_CAP_READ_MAP | | 
 | 952 | 					BDI_CAP_WRITE_MAP; | 
 | 953 | 				break; | 
 | 954 |  | 
 | 955 | 			default: | 
 | 956 | 				return -EINVAL; | 
 | 957 | 			} | 
 | 958 | 		} | 
 | 959 |  | 
 | 960 | 		/* eliminate any capabilities that we can't support on this | 
 | 961 | 		 * device */ | 
 | 962 | 		if (!file->f_op->get_unmapped_area) | 
 | 963 | 			capabilities &= ~BDI_CAP_MAP_DIRECT; | 
 | 964 | 		if (!file->f_op->read) | 
 | 965 | 			capabilities &= ~BDI_CAP_MAP_COPY; | 
 | 966 |  | 
| Graff Yang | 28d7a6a | 2009-08-18 14:11:17 -0700 | [diff] [blame] | 967 | 		/* The file shall have been opened with read permission. */ | 
 | 968 | 		if (!(file->f_mode & FMODE_READ)) | 
 | 969 | 			return -EACCES; | 
 | 970 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | 		if (flags & MAP_SHARED) { | 
 | 972 | 			/* do checks for writing, appending and locking */ | 
 | 973 | 			if ((prot & PROT_WRITE) && | 
 | 974 | 			    !(file->f_mode & FMODE_WRITE)) | 
 | 975 | 				return -EACCES; | 
 | 976 |  | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 977 | 			if (IS_APPEND(file->f_path.dentry->d_inode) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | 			    (file->f_mode & FMODE_WRITE)) | 
 | 979 | 				return -EACCES; | 
 | 980 |  | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 981 | 			if (locks_verify_locked(file->f_path.dentry->d_inode)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | 				return -EAGAIN; | 
 | 983 |  | 
 | 984 | 			if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 
 | 985 | 				return -ENODEV; | 
 | 986 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | 			/* we mustn't privatise shared mappings */ | 
 | 988 | 			capabilities &= ~BDI_CAP_MAP_COPY; | 
 | 989 | 		} | 
 | 990 | 		else { | 
 | 991 | 			/* we're going to read the file into private memory we | 
 | 992 | 			 * allocate */ | 
 | 993 | 			if (!(capabilities & BDI_CAP_MAP_COPY)) | 
 | 994 | 				return -ENODEV; | 
 | 995 |  | 
 | 996 | 			/* we don't permit a private writable mapping to be | 
 | 997 | 			 * shared with the backing device */ | 
 | 998 | 			if (prot & PROT_WRITE) | 
 | 999 | 				capabilities &= ~BDI_CAP_MAP_DIRECT; | 
 | 1000 | 		} | 
 | 1001 |  | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1002 | 		if (capabilities & BDI_CAP_MAP_DIRECT) { | 
 | 1003 | 			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  || | 
 | 1004 | 			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || | 
 | 1005 | 			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP)) | 
 | 1006 | 			    ) { | 
 | 1007 | 				capabilities &= ~BDI_CAP_MAP_DIRECT; | 
 | 1008 | 				if (flags & MAP_SHARED) { | 
 | 1009 | 					printk(KERN_WARNING | 
 | 1010 | 					       "MAP_SHARED not completely supported on !MMU\n"); | 
 | 1011 | 					return -EINVAL; | 
 | 1012 | 				} | 
 | 1013 | 			} | 
 | 1014 | 		} | 
 | 1015 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | 		/* handle executable mappings and implied executable | 
 | 1017 | 		 * mappings */ | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 1018 | 		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | 			if (prot & PROT_EXEC) | 
 | 1020 | 				return -EPERM; | 
 | 1021 | 		} | 
 | 1022 | 		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { | 
 | 1023 | 			/* handle implication of PROT_EXEC by PROT_READ */ | 
 | 1024 | 			if (current->personality & READ_IMPLIES_EXEC) { | 
 | 1025 | 				if (capabilities & BDI_CAP_EXEC_MAP) | 
 | 1026 | 					prot |= PROT_EXEC; | 
 | 1027 | 			} | 
 | 1028 | 		} | 
 | 1029 | 		else if ((prot & PROT_READ) && | 
 | 1030 | 			 (prot & PROT_EXEC) && | 
 | 1031 | 			 !(capabilities & BDI_CAP_EXEC_MAP) | 
 | 1032 | 			 ) { | 
 | 1033 | 			/* backing file is not executable, try to copy */ | 
 | 1034 | 			capabilities &= ~BDI_CAP_MAP_DIRECT; | 
 | 1035 | 		} | 
 | 1036 | 	} | 
 | 1037 | 	else { | 
 | 1038 | 		/* anonymous mappings are always memory backed and can be | 
 | 1039 | 		 * privately mapped | 
 | 1040 | 		 */ | 
 | 1041 | 		capabilities = BDI_CAP_MAP_COPY; | 
 | 1042 |  | 
 | 1043 | 		/* handle PROT_EXEC implication by PROT_READ */ | 
 | 1044 | 		if ((prot & PROT_READ) && | 
 | 1045 | 		    (current->personality & READ_IMPLIES_EXEC)) | 
 | 1046 | 			prot |= PROT_EXEC; | 
 | 1047 | 	} | 
 | 1048 |  | 
 | 1049 | 	/* allow the security API to have its say */ | 
| Eric Paris | ed03218 | 2007-06-28 15:55:21 -0400 | [diff] [blame] | 1050 | 	ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | 	if (ret < 0) | 
 | 1052 | 		return ret; | 
 | 1053 |  | 
 | 1054 | 	/* looks okay */ | 
 | 1055 | 	*_capabilities = capabilities; | 
 | 1056 | 	return 0; | 
 | 1057 | } | 
 | 1058 |  | 
 | 1059 | /* | 
 | 1060 |  * we've determined that we can make the mapping, now translate what we | 
 | 1061 |  * now know into VMA flags | 
 | 1062 |  */ | 
 | 1063 | static unsigned long determine_vm_flags(struct file *file, | 
 | 1064 | 					unsigned long prot, | 
 | 1065 | 					unsigned long flags, | 
 | 1066 | 					unsigned long capabilities) | 
 | 1067 | { | 
 | 1068 | 	unsigned long vm_flags; | 
 | 1069 |  | 
 | 1070 | 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | 	/* vm_flags |= mm->def_flags; */ | 
 | 1072 |  | 
 | 1073 | 	if (!(capabilities & BDI_CAP_MAP_DIRECT)) { | 
 | 1074 | 		/* attempt to share read-only copies of mapped file chunks */ | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1075 | 		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | 		if (file && !(prot & PROT_WRITE)) | 
 | 1077 | 			vm_flags |= VM_MAYSHARE; | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1078 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | 		/* overlay a shareable mapping on the backing device or inode | 
 | 1080 | 		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and | 
 | 1081 | 		 * romfs/cramfs */ | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1082 | 		vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | 		if (flags & MAP_SHARED) | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1084 | 			vm_flags |= VM_SHARED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1085 | 	} | 
 | 1086 |  | 
 | 1087 | 	/* refuse to let anyone share private mappings with this process if | 
 | 1088 | 	 * it's being traced - otherwise breakpoints set in it may interfere | 
 | 1089 | 	 * with another untraced process | 
 | 1090 | 	 */ | 
| Tejun Heo | a288eec | 2011-06-17 16:50:37 +0200 | [diff] [blame] | 1091 | 	if ((flags & MAP_PRIVATE) && current->ptrace) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | 		vm_flags &= ~VM_MAYSHARE; | 
 | 1093 |  | 
 | 1094 | 	return vm_flags; | 
 | 1095 | } | 
 | 1096 |  | 
 | 1097 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1098 |  * set up a shared mapping on a file (the driver or filesystem provides and | 
 | 1099 |  * pins the storage) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 |  */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1101 | static int do_mmap_shared_file(struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | { | 
 | 1103 | 	int ret; | 
 | 1104 |  | 
 | 1105 | 	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1106 | 	if (ret == 0) { | 
 | 1107 | 		vma->vm_region->vm_top = vma->vm_region->vm_end; | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1108 | 		return 0; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1109 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | 	if (ret != -ENOSYS) | 
 | 1111 | 		return ret; | 
 | 1112 |  | 
| David Howells | 3fa3046 | 2010-03-23 13:35:21 -0700 | [diff] [blame] | 1113 | 	/* getting -ENOSYS indicates that direct mmap isn't possible (as | 
 | 1114 | 	 * opposed to tried but failed) so we can only give a suitable error as | 
 | 1115 | 	 * it's not possible to make a private copy if MAP_SHARED was given */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | 	return -ENODEV; | 
 | 1117 | } | 
 | 1118 |  | 
 | 1119 | /* | 
 | 1120 |  * set up a private mapping or an anonymous shared mapping | 
 | 1121 |  */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1122 | static int do_mmap_private(struct vm_area_struct *vma, | 
 | 1123 | 			   struct vm_region *region, | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1124 | 			   unsigned long len, | 
 | 1125 | 			   unsigned long capabilities) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1127 | 	struct page *pages; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1128 | 	unsigned long total, point, n; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | 	void *base; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1130 | 	int ret, order; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 |  | 
 | 1132 | 	/* invoke the file's mapping function so that it can keep track of | 
 | 1133 | 	 * shared mappings on devices or memory | 
 | 1134 | 	 * - VM_MAYSHARE will be set if it may attempt to share | 
 | 1135 | 	 */ | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1136 | 	if (capabilities & BDI_CAP_MAP_DIRECT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1137 | 		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1138 | 		if (ret == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | 			/* shouldn't return success if we're not sharing */ | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1140 | 			BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); | 
 | 1141 | 			vma->vm_region->vm_top = vma->vm_region->vm_end; | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1142 | 			return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1143 | 		} | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1144 | 		if (ret != -ENOSYS) | 
 | 1145 | 			return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 |  | 
 | 1147 | 		/* getting an ENOSYS error indicates that direct mmap isn't | 
 | 1148 | 		 * possible (as opposed to tried but failed) so we'll try to | 
 | 1149 | 		 * make a private copy of the data and map that instead */ | 
 | 1150 | 	} | 
 | 1151 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1152 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 | 	/* allocate some memory to hold the mapping | 
 | 1154 | 	 * - note that this may not return a page-aligned address if the object | 
 | 1155 | 	 *   we're allocating is smaller than a page | 
 | 1156 | 	 */ | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1157 | 	order = get_order(len); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1158 | 	kdebug("alloc order %d for %lx", order, len); | 
 | 1159 |  | 
 | 1160 | 	pages = alloc_pages(GFP_KERNEL, order); | 
 | 1161 | 	if (!pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | 		goto enomem; | 
 | 1163 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1164 | 	total = 1 << order; | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1165 | 	atomic_long_add(total, &mmap_pages_allocated); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 |  | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1167 | 	point = len >> PAGE_SHIFT; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1168 |  | 
 | 1169 | 	/* we allocated a power-of-2 sized page set, so we may want to trim off | 
 | 1170 | 	 * the excess */ | 
 | 1171 | 	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | 
 | 1172 | 		while (total > point) { | 
 | 1173 | 			order = ilog2(total - point); | 
 | 1174 | 			n = 1 << order; | 
 | 1175 | 			kdebug("shave %lu/%lu @%lu", n, total - point, total); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1176 | 			atomic_long_sub(n, &mmap_pages_allocated); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1177 | 			total -= n; | 
 | 1178 | 			set_page_refcounted(pages + total); | 
 | 1179 | 			__free_pages(pages + total, order); | 
 | 1180 | 		} | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1181 | 	} | 
 | 1182 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1183 | 	for (point = 1; point < total; point++) | 
 | 1184 | 		set_page_refcounted(&pages[point]); | 
 | 1185 |  | 
 | 1186 | 	base = page_address(pages); | 
 | 1187 | 	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; | 
 | 1188 | 	region->vm_start = (unsigned long) base; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1189 | 	region->vm_end   = region->vm_start + len; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1190 | 	region->vm_top   = region->vm_start + (total << PAGE_SHIFT); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1191 |  | 
 | 1192 | 	vma->vm_start = region->vm_start; | 
 | 1193 | 	vma->vm_end   = region->vm_start + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 |  | 
 | 1195 | 	if (vma->vm_file) { | 
 | 1196 | 		/* read the contents of a file into the copy */ | 
 | 1197 | 		mm_segment_t old_fs; | 
 | 1198 | 		loff_t fpos; | 
 | 1199 |  | 
 | 1200 | 		fpos = vma->vm_pgoff; | 
 | 1201 | 		fpos <<= PAGE_SHIFT; | 
 | 1202 |  | 
 | 1203 | 		old_fs = get_fs(); | 
 | 1204 | 		set_fs(KERNEL_DS); | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1205 | 		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | 		set_fs(old_fs); | 
 | 1207 |  | 
 | 1208 | 		if (ret < 0) | 
 | 1209 | 			goto error_free; | 
 | 1210 |  | 
 | 1211 | 		/* clear the last little bit */ | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1212 | 		if (ret < len) | 
 | 1213 | 			memset(base + ret, 0, len - ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | 	} | 
 | 1216 |  | 
 | 1217 | 	return 0; | 
 | 1218 |  | 
 | 1219 | error_free: | 
| Namhyung Kim | 7223bb4 | 2011-05-24 17:11:26 -0700 | [diff] [blame] | 1220 | 	free_page_series(region->vm_start, region->vm_top); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1221 | 	region->vm_start = vma->vm_start = 0; | 
 | 1222 | 	region->vm_end   = vma->vm_end = 0; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1223 | 	region->vm_top   = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | 	return ret; | 
 | 1225 |  | 
 | 1226 | enomem: | 
| Greg Ungerer | 05ae6fa | 2009-01-13 17:30:22 +1000 | [diff] [blame] | 1227 | 	printk("Allocation of length %lu from process %d (%s) failed\n", | 
 | 1228 | 	       len, current->pid, current->comm); | 
| David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1229 | 	show_free_areas(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | 	return -ENOMEM; | 
 | 1231 | } | 
 | 1232 |  | 
 | 1233 | /* | 
 | 1234 |  * handle mapping creation for uClinux | 
 | 1235 |  */ | 
 | 1236 | unsigned long do_mmap_pgoff(struct file *file, | 
 | 1237 | 			    unsigned long addr, | 
 | 1238 | 			    unsigned long len, | 
 | 1239 | 			    unsigned long prot, | 
 | 1240 | 			    unsigned long flags, | 
 | 1241 | 			    unsigned long pgoff) | 
 | 1242 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1243 | 	struct vm_area_struct *vma; | 
 | 1244 | 	struct vm_region *region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | 	struct rb_node *rb; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1246 | 	unsigned long capabilities, vm_flags, result; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | 	int ret; | 
 | 1248 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1249 | 	kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); | 
 | 1250 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | 	/* decide whether we should attempt the mapping, and if so what sort of | 
 | 1252 | 	 * mapping */ | 
 | 1253 | 	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, | 
 | 1254 | 				    &capabilities); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1255 | 	if (ret < 0) { | 
 | 1256 | 		kleave(" = %d [val]", ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1257 | 		return ret; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1258 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 |  | 
| David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 1260 | 	/* we ignore the address hint */ | 
 | 1261 | 	addr = 0; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1262 | 	len = PAGE_ALIGN(len); | 
| David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 1263 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | 	/* we've determined that we can make the mapping, now translate what we | 
 | 1265 | 	 * now know into VMA flags */ | 
 | 1266 | 	vm_flags = determine_vm_flags(file, prot, flags, capabilities); | 
 | 1267 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1268 | 	/* we're going to need to record the mapping */ | 
 | 1269 | 	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); | 
 | 1270 | 	if (!region) | 
 | 1271 | 		goto error_getting_region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1273 | 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 
 | 1274 | 	if (!vma) | 
 | 1275 | 		goto error_getting_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 |  | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1277 | 	region->vm_usage = 1; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1278 | 	region->vm_flags = vm_flags; | 
 | 1279 | 	region->vm_pgoff = pgoff; | 
 | 1280 |  | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1281 | 	INIT_LIST_HEAD(&vma->anon_vma_chain); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1282 | 	vma->vm_flags = vm_flags; | 
 | 1283 | 	vma->vm_pgoff = pgoff; | 
 | 1284 |  | 
 | 1285 | 	if (file) { | 
 | 1286 | 		region->vm_file = file; | 
 | 1287 | 		get_file(file); | 
 | 1288 | 		vma->vm_file = file; | 
 | 1289 | 		get_file(file); | 
 | 1290 | 		if (vm_flags & VM_EXECUTABLE) { | 
 | 1291 | 			added_exe_file_vma(current->mm); | 
 | 1292 | 			vma->vm_mm = current->mm; | 
 | 1293 | 		} | 
 | 1294 | 	} | 
 | 1295 |  | 
 | 1296 | 	down_write(&nommu_region_sem); | 
 | 1297 |  | 
 | 1298 | 	/* if we want to share, we need to check for regions created by other | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | 	 * mmap() calls that overlap with our proposed mapping | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1300 | 	 * - we can only share with a superset match on most regular files | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | 	 * - shared mappings on character devices and memory backed files are | 
 | 1302 | 	 *   permitted to overlap inexactly as far as we are concerned for in | 
 | 1303 | 	 *   these cases, sharing is handled in the driver or filesystem rather | 
 | 1304 | 	 *   than here | 
 | 1305 | 	 */ | 
 | 1306 | 	if (vm_flags & VM_MAYSHARE) { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1307 | 		struct vm_region *pregion; | 
 | 1308 | 		unsigned long pglen, rpglen, pgend, rpgend, start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1310 | 		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 1311 | 		pgend = pgoff + pglen; | 
| David Howells | 165b239 | 2007-03-22 00:11:24 -0800 | [diff] [blame] | 1312 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1313 | 		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { | 
 | 1314 | 			pregion = rb_entry(rb, struct vm_region, vm_rb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1315 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1316 | 			if (!(pregion->vm_flags & VM_MAYSHARE)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | 				continue; | 
 | 1318 |  | 
 | 1319 | 			/* search for overlapping mappings on the same file */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1320 | 			if (pregion->vm_file->f_path.dentry->d_inode != | 
 | 1321 | 			    file->f_path.dentry->d_inode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | 				continue; | 
 | 1323 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1324 | 			if (pregion->vm_pgoff >= pgend) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | 				continue; | 
 | 1326 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1327 | 			rpglen = pregion->vm_end - pregion->vm_start; | 
 | 1328 | 			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 1329 | 			rpgend = pregion->vm_pgoff + rpglen; | 
 | 1330 | 			if (pgoff >= rpgend) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | 				continue; | 
 | 1332 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1333 | 			/* handle inexactly overlapping matches between | 
 | 1334 | 			 * mappings */ | 
 | 1335 | 			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && | 
 | 1336 | 			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { | 
 | 1337 | 				/* new mapping is not a subset of the region */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | 				if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 
 | 1339 | 					goto sharing_violation; | 
 | 1340 | 				continue; | 
 | 1341 | 			} | 
 | 1342 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1343 | 			/* we've found a region we can share */ | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1344 | 			pregion->vm_usage++; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1345 | 			vma->vm_region = pregion; | 
 | 1346 | 			start = pregion->vm_start; | 
 | 1347 | 			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; | 
 | 1348 | 			vma->vm_start = start; | 
 | 1349 | 			vma->vm_end = start + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1351 | 			if (pregion->vm_flags & VM_MAPPED_COPY) { | 
 | 1352 | 				kdebug("share copy"); | 
 | 1353 | 				vma->vm_flags |= VM_MAPPED_COPY; | 
 | 1354 | 			} else { | 
 | 1355 | 				kdebug("share mmap"); | 
 | 1356 | 				ret = do_mmap_shared_file(vma); | 
 | 1357 | 				if (ret < 0) { | 
 | 1358 | 					vma->vm_region = NULL; | 
 | 1359 | 					vma->vm_start = 0; | 
 | 1360 | 					vma->vm_end = 0; | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1361 | 					pregion->vm_usage--; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1362 | 					pregion = NULL; | 
 | 1363 | 					goto error_just_free; | 
 | 1364 | 				} | 
 | 1365 | 			} | 
 | 1366 | 			fput(region->vm_file); | 
 | 1367 | 			kmem_cache_free(vm_region_jar, region); | 
 | 1368 | 			region = pregion; | 
 | 1369 | 			result = start; | 
 | 1370 | 			goto share; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 | 		} | 
 | 1372 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1373 | 		/* obtain the address at which to make a shared mapping | 
 | 1374 | 		 * - this is the hook for quasi-memory character devices to | 
 | 1375 | 		 *   tell us the location of a shared mapping | 
 | 1376 | 		 */ | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1377 | 		if (capabilities & BDI_CAP_MAP_DIRECT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | 			addr = file->f_op->get_unmapped_area(file, addr, len, | 
 | 1379 | 							     pgoff, flags); | 
| Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1380 | 			if (IS_ERR_VALUE(addr)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | 				ret = addr; | 
| Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1382 | 				if (ret != -ENOSYS) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1383 | 					goto error_just_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1384 |  | 
 | 1385 | 				/* the driver refused to tell us where to site | 
 | 1386 | 				 * the mapping so we'll have to attempt to copy | 
 | 1387 | 				 * it */ | 
| Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1388 | 				ret = -ENODEV; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1389 | 				if (!(capabilities & BDI_CAP_MAP_COPY)) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1390 | 					goto error_just_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 |  | 
 | 1392 | 				capabilities &= ~BDI_CAP_MAP_DIRECT; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1393 | 			} else { | 
 | 1394 | 				vma->vm_start = region->vm_start = addr; | 
 | 1395 | 				vma->vm_end = region->vm_end = addr + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | 			} | 
 | 1397 | 		} | 
 | 1398 | 	} | 
 | 1399 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1400 | 	vma->vm_region = region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 |  | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1402 | 	/* set up the mapping | 
 | 1403 | 	 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set | 
 | 1404 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | 	if (file && vma->vm_flags & VM_SHARED) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1406 | 		ret = do_mmap_shared_file(vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1407 | 	else | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1408 | 		ret = do_mmap_private(vma, region, len, capabilities); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | 	if (ret < 0) | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1410 | 		goto error_just_free; | 
 | 1411 | 	add_nommu_region(region); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1412 |  | 
| Jie Zhang | ea63763 | 2009-12-14 18:00:02 -0800 | [diff] [blame] | 1413 | 	/* clear anonymous mappings that don't ask for uninitialized data */ | 
 | 1414 | 	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) | 
 | 1415 | 		memset((void *)region->vm_start, 0, | 
 | 1416 | 		       region->vm_end - region->vm_start); | 
 | 1417 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 | 	/* okay... we have a mapping; now we have to register it */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1419 | 	result = vma->vm_start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1421 | 	current->mm->total_vm += len >> PAGE_SHIFT; | 
 | 1422 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1423 | share: | 
 | 1424 | 	add_vma_to_mm(current->mm, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 |  | 
| Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 1426 | 	/* we flush the region from the icache only when the first executable | 
 | 1427 | 	 * mapping of it is made  */ | 
 | 1428 | 	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { | 
 | 1429 | 		flush_icache_range(region->vm_start, region->vm_end); | 
 | 1430 | 		region->vm_icache_flushed = true; | 
 | 1431 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 |  | 
| Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 1433 | 	up_write(&nommu_region_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1435 | 	kleave(" = %lx", result); | 
 | 1436 | 	return result; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1438 | error_just_free: | 
 | 1439 | 	up_write(&nommu_region_sem); | 
 | 1440 | error: | 
| David Howells | 89a8640 | 2009-10-30 13:13:26 +0000 | [diff] [blame] | 1441 | 	if (region->vm_file) | 
 | 1442 | 		fput(region->vm_file); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1443 | 	kmem_cache_free(vm_region_jar, region); | 
| David Howells | 89a8640 | 2009-10-30 13:13:26 +0000 | [diff] [blame] | 1444 | 	if (vma->vm_file) | 
 | 1445 | 		fput(vma->vm_file); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1446 | 	if (vma->vm_flags & VM_EXECUTABLE) | 
 | 1447 | 		removed_exe_file_vma(vma->vm_mm); | 
 | 1448 | 	kmem_cache_free(vm_area_cachep, vma); | 
 | 1449 | 	kleave(" = %d", ret); | 
 | 1450 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1452 | sharing_violation: | 
 | 1453 | 	up_write(&nommu_region_sem); | 
 | 1454 | 	printk(KERN_WARNING "Attempt to share mismatched mappings\n"); | 
 | 1455 | 	ret = -EINVAL; | 
 | 1456 | 	goto error; | 
 | 1457 |  | 
 | 1458 | error_getting_vma: | 
 | 1459 | 	kmem_cache_free(vm_region_jar, region); | 
 | 1460 | 	printk(KERN_WARNING "Allocation of vma for %lu byte allocation" | 
 | 1461 | 	       " from process %d failed\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | 	       len, current->pid); | 
| David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1463 | 	show_free_areas(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | 	return -ENOMEM; | 
 | 1465 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1466 | error_getting_region: | 
 | 1467 | 	printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" | 
 | 1468 | 	       " from process %d failed\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1469 | 	       len, current->pid); | 
| David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1470 | 	show_free_areas(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1471 | 	return -ENOMEM; | 
 | 1472 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1473 | EXPORT_SYMBOL(do_mmap_pgoff); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1474 |  | 
| Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1475 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | 
 | 1476 | 		unsigned long, prot, unsigned long, flags, | 
 | 1477 | 		unsigned long, fd, unsigned long, pgoff) | 
 | 1478 | { | 
 | 1479 | 	struct file *file = NULL; | 
 | 1480 | 	unsigned long retval = -EBADF; | 
 | 1481 |  | 
| Al Viro | 120a795 | 2010-10-30 02:54:44 -0400 | [diff] [blame] | 1482 | 	audit_mmap_fd(fd, flags); | 
| Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1483 | 	if (!(flags & MAP_ANONYMOUS)) { | 
 | 1484 | 		file = fget(fd); | 
 | 1485 | 		if (!file) | 
 | 1486 | 			goto out; | 
 | 1487 | 	} | 
 | 1488 |  | 
 | 1489 | 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 
 | 1490 |  | 
 | 1491 | 	down_write(¤t->mm->mmap_sem); | 
 | 1492 | 	retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | 
 | 1493 | 	up_write(¤t->mm->mmap_sem); | 
 | 1494 |  | 
 | 1495 | 	if (file) | 
 | 1496 | 		fput(file); | 
 | 1497 | out: | 
 | 1498 | 	return retval; | 
 | 1499 | } | 
 | 1500 |  | 
| Christoph Hellwig | a467937 | 2010-03-10 15:21:15 -0800 | [diff] [blame] | 1501 | #ifdef __ARCH_WANT_SYS_OLD_MMAP | 
 | 1502 | struct mmap_arg_struct { | 
 | 1503 | 	unsigned long addr; | 
 | 1504 | 	unsigned long len; | 
 | 1505 | 	unsigned long prot; | 
 | 1506 | 	unsigned long flags; | 
 | 1507 | 	unsigned long fd; | 
 | 1508 | 	unsigned long offset; | 
 | 1509 | }; | 
 | 1510 |  | 
 | 1511 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) | 
 | 1512 | { | 
 | 1513 | 	struct mmap_arg_struct a; | 
 | 1514 |  | 
 | 1515 | 	if (copy_from_user(&a, arg, sizeof(a))) | 
 | 1516 | 		return -EFAULT; | 
 | 1517 | 	if (a.offset & ~PAGE_MASK) | 
 | 1518 | 		return -EINVAL; | 
 | 1519 |  | 
 | 1520 | 	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, | 
 | 1521 | 			      a.offset >> PAGE_SHIFT); | 
 | 1522 | } | 
 | 1523 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ | 
 | 1524 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1525 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1526 |  * split a vma into two pieces at address 'addr', a new vma is allocated either | 
 | 1527 |  * for the first part or the tail. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 |  */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1529 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 1530 | 	      unsigned long addr, int new_below) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1532 | 	struct vm_area_struct *new; | 
 | 1533 | 	struct vm_region *region; | 
 | 1534 | 	unsigned long npages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1536 | 	kenter(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1537 |  | 
| David Howells | 779c102 | 2010-01-15 17:01:34 -0800 | [diff] [blame] | 1538 | 	/* we're only permitted to split anonymous regions (these should have | 
 | 1539 | 	 * only a single usage on the region) */ | 
 | 1540 | 	if (vma->vm_file) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1541 | 		return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1542 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1543 | 	if (mm->map_count >= sysctl_max_map_count) | 
 | 1544 | 		return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1546 | 	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); | 
 | 1547 | 	if (!region) | 
 | 1548 | 		return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1550 | 	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 
 | 1551 | 	if (!new) { | 
 | 1552 | 		kmem_cache_free(vm_region_jar, region); | 
 | 1553 | 		return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | 	} | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1555 |  | 
 | 1556 | 	/* most fields are the same, copy all, and then fixup */ | 
 | 1557 | 	*new = *vma; | 
 | 1558 | 	*region = *vma->vm_region; | 
 | 1559 | 	new->vm_region = region; | 
 | 1560 |  | 
 | 1561 | 	npages = (addr - vma->vm_start) >> PAGE_SHIFT; | 
 | 1562 |  | 
 | 1563 | 	if (new_below) { | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1564 | 		region->vm_top = region->vm_end = new->vm_end = addr; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1565 | 	} else { | 
 | 1566 | 		region->vm_start = new->vm_start = addr; | 
 | 1567 | 		region->vm_pgoff = new->vm_pgoff += npages; | 
 | 1568 | 	} | 
 | 1569 |  | 
 | 1570 | 	if (new->vm_ops && new->vm_ops->open) | 
 | 1571 | 		new->vm_ops->open(new); | 
 | 1572 |  | 
 | 1573 | 	delete_vma_from_mm(vma); | 
 | 1574 | 	down_write(&nommu_region_sem); | 
 | 1575 | 	delete_nommu_region(vma->vm_region); | 
 | 1576 | 	if (new_below) { | 
 | 1577 | 		vma->vm_region->vm_start = vma->vm_start = addr; | 
 | 1578 | 		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; | 
 | 1579 | 	} else { | 
 | 1580 | 		vma->vm_region->vm_end = vma->vm_end = addr; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1581 | 		vma->vm_region->vm_top = addr; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1582 | 	} | 
 | 1583 | 	add_nommu_region(vma->vm_region); | 
 | 1584 | 	add_nommu_region(new->vm_region); | 
 | 1585 | 	up_write(&nommu_region_sem); | 
 | 1586 | 	add_vma_to_mm(mm, vma); | 
 | 1587 | 	add_vma_to_mm(mm, new); | 
 | 1588 | 	return 0; | 
 | 1589 | } | 
 | 1590 |  | 
 | 1591 | /* | 
 | 1592 |  * shrink a VMA by removing the specified chunk from either the beginning or | 
 | 1593 |  * the end | 
 | 1594 |  */ | 
 | 1595 | static int shrink_vma(struct mm_struct *mm, | 
 | 1596 | 		      struct vm_area_struct *vma, | 
 | 1597 | 		      unsigned long from, unsigned long to) | 
 | 1598 | { | 
 | 1599 | 	struct vm_region *region; | 
 | 1600 |  | 
 | 1601 | 	kenter(""); | 
 | 1602 |  | 
 | 1603 | 	/* adjust the VMA's pointers, which may reposition it in the MM's tree | 
 | 1604 | 	 * and list */ | 
 | 1605 | 	delete_vma_from_mm(vma); | 
 | 1606 | 	if (from > vma->vm_start) | 
 | 1607 | 		vma->vm_end = from; | 
 | 1608 | 	else | 
 | 1609 | 		vma->vm_start = to; | 
 | 1610 | 	add_vma_to_mm(mm, vma); | 
 | 1611 |  | 
 | 1612 | 	/* cut the backing region down to size */ | 
 | 1613 | 	region = vma->vm_region; | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1614 | 	BUG_ON(region->vm_usage != 1); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1615 |  | 
 | 1616 | 	down_write(&nommu_region_sem); | 
 | 1617 | 	delete_nommu_region(region); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1618 | 	if (from > region->vm_start) { | 
 | 1619 | 		to = region->vm_top; | 
 | 1620 | 		region->vm_top = region->vm_end = from; | 
 | 1621 | 	} else { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1622 | 		region->vm_start = to; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1623 | 	} | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1624 | 	add_nommu_region(region); | 
 | 1625 | 	up_write(&nommu_region_sem); | 
 | 1626 |  | 
 | 1627 | 	free_page_series(from, to); | 
 | 1628 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | } | 
 | 1630 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1631 | /* | 
 | 1632 |  * release a mapping | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1633 |  * - under NOMMU conditions the chunk to be unmapped must be backed by a single | 
 | 1634 |  *   VMA, though it need not cover the whole VMA | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1635 |  */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1636 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1638 | 	struct vm_area_struct *vma; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1639 | 	unsigned long end; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1640 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1642 | 	kenter(",%lx,%zx", start, len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1643 |  | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1644 | 	len = PAGE_ALIGN(len); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1645 | 	if (len == 0) | 
 | 1646 | 		return -EINVAL; | 
 | 1647 |  | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1648 | 	end = start + len; | 
 | 1649 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1650 | 	/* find the first potentially overlapping VMA */ | 
 | 1651 | 	vma = find_vma(mm, start); | 
 | 1652 | 	if (!vma) { | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1653 | 		static int limit = 0; | 
 | 1654 | 		if (limit < 5) { | 
 | 1655 | 			printk(KERN_WARNING | 
 | 1656 | 			       "munmap of memory not mmapped by process %d" | 
 | 1657 | 			       " (%s): 0x%lx-0x%lx\n", | 
 | 1658 | 			       current->pid, current->comm, | 
 | 1659 | 			       start, start + len - 1); | 
 | 1660 | 			limit++; | 
 | 1661 | 		} | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1662 | 		return -EINVAL; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1663 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1664 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1665 | 	/* we're allowed to split an anonymous VMA but not a file-backed one */ | 
 | 1666 | 	if (vma->vm_file) { | 
 | 1667 | 		do { | 
 | 1668 | 			if (start > vma->vm_start) { | 
 | 1669 | 				kleave(" = -EINVAL [miss]"); | 
 | 1670 | 				return -EINVAL; | 
 | 1671 | 			} | 
 | 1672 | 			if (end == vma->vm_end) | 
 | 1673 | 				goto erase_whole_vma; | 
| Namhyung Kim | d75a310 | 2011-05-24 17:11:25 -0700 | [diff] [blame] | 1674 | 			vma = vma->vm_next; | 
 | 1675 | 		} while (vma); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1676 | 		kleave(" = -EINVAL [split file]"); | 
 | 1677 | 		return -EINVAL; | 
 | 1678 | 	} else { | 
 | 1679 | 		/* the chunk must be a subset of the VMA found */ | 
 | 1680 | 		if (start == vma->vm_start && end == vma->vm_end) | 
 | 1681 | 			goto erase_whole_vma; | 
 | 1682 | 		if (start < vma->vm_start || end > vma->vm_end) { | 
 | 1683 | 			kleave(" = -EINVAL [superset]"); | 
 | 1684 | 			return -EINVAL; | 
 | 1685 | 		} | 
 | 1686 | 		if (start & ~PAGE_MASK) { | 
 | 1687 | 			kleave(" = -EINVAL [unaligned start]"); | 
 | 1688 | 			return -EINVAL; | 
 | 1689 | 		} | 
 | 1690 | 		if (end != vma->vm_end && end & ~PAGE_MASK) { | 
 | 1691 | 			kleave(" = -EINVAL [unaligned split]"); | 
 | 1692 | 			return -EINVAL; | 
 | 1693 | 		} | 
 | 1694 | 		if (start != vma->vm_start && end != vma->vm_end) { | 
 | 1695 | 			ret = split_vma(mm, vma, start, 1); | 
 | 1696 | 			if (ret < 0) { | 
 | 1697 | 				kleave(" = %d [split]", ret); | 
 | 1698 | 				return ret; | 
 | 1699 | 			} | 
 | 1700 | 		} | 
 | 1701 | 		return shrink_vma(mm, vma, start, end); | 
 | 1702 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1704 | erase_whole_vma: | 
 | 1705 | 	delete_vma_from_mm(vma); | 
 | 1706 | 	delete_vma(mm, vma); | 
 | 1707 | 	kleave(" = 0"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1708 | 	return 0; | 
 | 1709 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1710 | EXPORT_SYMBOL(do_munmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1712 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1713 | { | 
 | 1714 | 	int ret; | 
 | 1715 | 	struct mm_struct *mm = current->mm; | 
 | 1716 |  | 
 | 1717 | 	down_write(&mm->mmap_sem); | 
 | 1718 | 	ret = do_munmap(mm, addr, len); | 
 | 1719 | 	up_write(&mm->mmap_sem); | 
 | 1720 | 	return ret; | 
 | 1721 | } | 
 | 1722 |  | 
 | 1723 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1724 |  * release all the mappings made in a process's VM space | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1725 |  */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1726 | void exit_mmap(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1728 | 	struct vm_area_struct *vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1729 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1730 | 	if (!mm) | 
 | 1731 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1733 | 	kenter(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1734 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1735 | 	mm->total_vm = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1736 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1737 | 	while ((vma = mm->mmap)) { | 
 | 1738 | 		mm->mmap = vma->vm_next; | 
 | 1739 | 		delete_vma_from_mm(vma); | 
 | 1740 | 		delete_vma(mm, vma); | 
| Steven J. Magnani | 04c34961 | 2010-11-24 12:56:54 -0800 | [diff] [blame] | 1741 | 		cond_resched(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1742 | 	} | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1743 |  | 
 | 1744 | 	kleave(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | } | 
 | 1746 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1747 | unsigned long do_brk(unsigned long addr, unsigned long len) | 
 | 1748 | { | 
 | 1749 | 	return -ENOMEM; | 
 | 1750 | } | 
 | 1751 |  | 
 | 1752 | /* | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1753 |  * expand (or shrink) an existing mapping, potentially moving it at the same | 
 | 1754 |  * time (controlled by the MREMAP_MAYMOVE flag and available VM space) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 |  * | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1756 |  * under NOMMU conditions, we only permit changing a mapping's size, and only | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1757 |  * as long as it stays within the region allocated by do_mmap_private() and the | 
 | 1758 |  * block is not shareable | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 |  * | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1760 |  * MREMAP_FIXED is not supported under NOMMU conditions | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1761 |  */ | 
 | 1762 | unsigned long do_mremap(unsigned long addr, | 
 | 1763 | 			unsigned long old_len, unsigned long new_len, | 
 | 1764 | 			unsigned long flags, unsigned long new_addr) | 
 | 1765 | { | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1766 | 	struct vm_area_struct *vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1767 |  | 
 | 1768 | 	/* insanity checks first */ | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1769 | 	old_len = PAGE_ALIGN(old_len); | 
 | 1770 | 	new_len = PAGE_ALIGN(new_len); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1771 | 	if (old_len == 0 || new_len == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | 		return (unsigned long) -EINVAL; | 
 | 1773 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1774 | 	if (addr & ~PAGE_MASK) | 
 | 1775 | 		return -EINVAL; | 
 | 1776 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | 	if (flags & MREMAP_FIXED && new_addr != addr) | 
 | 1778 | 		return (unsigned long) -EINVAL; | 
 | 1779 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1780 | 	vma = find_vma_exact(current->mm, addr, old_len); | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1781 | 	if (!vma) | 
 | 1782 | 		return (unsigned long) -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 |  | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1784 | 	if (vma->vm_end != vma->vm_start + old_len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | 		return (unsigned long) -EFAULT; | 
 | 1786 |  | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1787 | 	if (vma->vm_flags & VM_MAYSHARE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1788 | 		return (unsigned long) -EPERM; | 
 | 1789 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1790 | 	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | 		return (unsigned long) -ENOMEM; | 
 | 1792 |  | 
 | 1793 | 	/* all checks complete - do it */ | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1794 | 	vma->vm_end = vma->vm_start + new_len; | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1795 | 	return vma->vm_start; | 
 | 1796 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1797 | EXPORT_SYMBOL(do_mremap); | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1798 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1799 | SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | 
 | 1800 | 		unsigned long, new_len, unsigned long, flags, | 
 | 1801 | 		unsigned long, new_addr) | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1802 | { | 
 | 1803 | 	unsigned long ret; | 
 | 1804 |  | 
 | 1805 | 	down_write(¤t->mm->mmap_sem); | 
 | 1806 | 	ret = do_mremap(addr, old_len, new_len, flags, new_addr); | 
 | 1807 | 	up_write(¤t->mm->mmap_sem); | 
 | 1808 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1809 | } | 
 | 1810 |  | 
| Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1811 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | 
| Hugh Dickins | deceb6c | 2005-10-29 18:16:33 -0700 | [diff] [blame] | 1812 | 			unsigned int foll_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1813 | { | 
 | 1814 | 	return NULL; | 
 | 1815 | } | 
 | 1816 |  | 
| Bob Liu | 8f3b132 | 2011-07-08 15:39:46 -0700 | [diff] [blame] | 1817 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | 
 | 1818 | 		unsigned long pfn, unsigned long size, pgprot_t prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1819 | { | 
| Bob Liu | 8f3b132 | 2011-07-08 15:39:46 -0700 | [diff] [blame] | 1820 | 	if (addr != (pfn << PAGE_SHIFT)) | 
 | 1821 | 		return -EINVAL; | 
 | 1822 |  | 
 | 1823 | 	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 
| Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 1824 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1825 | } | 
| Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1826 | EXPORT_SYMBOL(remap_pfn_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 |  | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 1828 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 
 | 1829 | 			unsigned long pgoff) | 
 | 1830 | { | 
 | 1831 | 	unsigned int size = vma->vm_end - vma->vm_start; | 
 | 1832 |  | 
 | 1833 | 	if (!(vma->vm_flags & VM_USERMAP)) | 
 | 1834 | 		return -EINVAL; | 
 | 1835 |  | 
 | 1836 | 	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); | 
 | 1837 | 	vma->vm_end = vma->vm_start + size; | 
 | 1838 |  | 
 | 1839 | 	return 0; | 
 | 1840 | } | 
 | 1841 | EXPORT_SYMBOL(remap_vmalloc_range); | 
 | 1842 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, | 
 | 1844 | 	unsigned long len, unsigned long pgoff, unsigned long flags) | 
 | 1845 | { | 
 | 1846 | 	return -ENOMEM; | 
 | 1847 | } | 
 | 1848 |  | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 1849 | void arch_unmap_area(struct mm_struct *mm, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1850 | { | 
 | 1851 | } | 
 | 1852 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1853 | void unmap_mapping_range(struct address_space *mapping, | 
 | 1854 | 			 loff_t const holebegin, loff_t const holelen, | 
 | 1855 | 			 int even_cows) | 
 | 1856 | { | 
 | 1857 | } | 
| Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1858 | EXPORT_SYMBOL(unmap_mapping_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 |  | 
 | 1860 | /* | 
 | 1861 |  * Check that a process has enough memory to allocate a new virtual | 
 | 1862 |  * mapping. 0 means there is enough memory for the allocation to | 
 | 1863 |  * succeed and -ENOMEM implies there is not. | 
 | 1864 |  * | 
 | 1865 |  * We currently support three overcommit policies, which are set via the | 
 | 1866 |  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting | 
 | 1867 |  * | 
 | 1868 |  * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | 
 | 1869 |  * Additional code 2002 Jul 20 by Robert Love. | 
 | 1870 |  * | 
 | 1871 |  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | 
 | 1872 |  * | 
 | 1873 |  * Note this is a helper function intended to be used by LSMs which | 
 | 1874 |  * wish to use this logic. | 
 | 1875 |  */ | 
| Alan Cox | 34b4e4a | 2007-08-22 14:01:28 -0700 | [diff] [blame] | 1876 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1877 | { | 
 | 1878 | 	unsigned long free, allowed; | 
 | 1879 |  | 
 | 1880 | 	vm_acct_memory(pages); | 
 | 1881 |  | 
 | 1882 | 	/* | 
 | 1883 | 	 * Sometimes we want to use more memory than we have | 
 | 1884 | 	 */ | 
 | 1885 | 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | 
 | 1886 | 		return 0; | 
 | 1887 |  | 
 | 1888 | 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 
| Dmitry Fink | c15bef3 | 2011-07-25 17:12:19 -0700 | [diff] [blame] | 1889 | 		free = global_page_state(NR_FREE_PAGES); | 
 | 1890 | 		free += global_page_state(NR_FILE_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1891 |  | 
| Dmitry Fink | c15bef3 | 2011-07-25 17:12:19 -0700 | [diff] [blame] | 1892 | 		/* | 
 | 1893 | 		 * shmem pages shouldn't be counted as free in this | 
 | 1894 | 		 * case, they can't be purged, only swapped out, and | 
 | 1895 | 		 * that won't affect the overall amount of available | 
 | 1896 | 		 * memory in the system. | 
 | 1897 | 		 */ | 
 | 1898 | 		free -= global_page_state(NR_SHMEM); | 
 | 1899 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1900 | 		free += nr_swap_pages; | 
 | 1901 |  | 
 | 1902 | 		/* | 
 | 1903 | 		 * Any slabs which are created with the | 
 | 1904 | 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents | 
 | 1905 | 		 * which are reclaimable, under pressure.  The dentry | 
 | 1906 | 		 * cache and most inode caches should fall into this | 
 | 1907 | 		 */ | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1908 | 		free += global_page_state(NR_SLAB_RECLAIMABLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 |  | 
 | 1910 | 		/* | 
| Dmitry Fink | c15bef3 | 2011-07-25 17:12:19 -0700 | [diff] [blame] | 1911 | 		 * Leave reserved pages. The pages are not for anonymous pages. | 
 | 1912 | 		 */ | 
 | 1913 | 		if (free <= totalreserve_pages) | 
 | 1914 | 			goto error; | 
 | 1915 | 		else | 
 | 1916 | 			free -= totalreserve_pages; | 
 | 1917 |  | 
 | 1918 | 		/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 | 		 * Leave the last 3% for root | 
 | 1920 | 		 */ | 
 | 1921 | 		if (!cap_sys_admin) | 
 | 1922 | 			free -= free / 32; | 
 | 1923 |  | 
 | 1924 | 		if (free > pages) | 
 | 1925 | 			return 0; | 
 | 1926 |  | 
| Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1927 | 		goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1928 | 	} | 
 | 1929 |  | 
 | 1930 | 	allowed = totalram_pages * sysctl_overcommit_ratio / 100; | 
 | 1931 | 	/* | 
 | 1932 | 	 * Leave the last 3% for root | 
 | 1933 | 	 */ | 
 | 1934 | 	if (!cap_sys_admin) | 
 | 1935 | 		allowed -= allowed / 32; | 
 | 1936 | 	allowed += total_swap_pages; | 
 | 1937 |  | 
 | 1938 | 	/* Don't let a single process grow too big: | 
 | 1939 | 	   leave 3% of the size of this process for other processes */ | 
| Alan Cox | 731572d | 2008-10-29 14:01:20 -0700 | [diff] [blame] | 1940 | 	if (mm) | 
 | 1941 | 		allowed -= mm->total_vm / 32; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1942 |  | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 1943 | 	if (percpu_counter_read_positive(&vm_committed_as) < allowed) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1944 | 		return 0; | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 1945 |  | 
| Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1946 | error: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1947 | 	vm_unacct_memory(pages); | 
 | 1948 |  | 
 | 1949 | 	return -ENOMEM; | 
 | 1950 | } | 
 | 1951 |  | 
| Stephen Wilson | cae5d39 | 2011-03-13 15:49:17 -0400 | [diff] [blame] | 1952 | int in_gate_area_no_mm(unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1953 | { | 
 | 1954 | 	return 0; | 
 | 1955 | } | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1956 |  | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1957 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1958 | { | 
 | 1959 | 	BUG(); | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1960 | 	return 0; | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1961 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1962 | EXPORT_SYMBOL(filemap_fault); | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1963 |  | 
| Mike Frysinger | f55f199 | 2011-03-29 14:05:12 +0100 | [diff] [blame] | 1964 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | 
 | 1965 | 		unsigned long addr, void *buf, int len, int write) | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1966 | { | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1967 | 	struct vm_area_struct *vma; | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1968 |  | 
 | 1969 | 	down_read(&mm->mmap_sem); | 
 | 1970 |  | 
 | 1971 | 	/* the access must start within one of the target process's mappings */ | 
| David Howells | 0159b14 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 1972 | 	vma = find_vma(mm, addr); | 
 | 1973 | 	if (vma) { | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1974 | 		/* don't overrun this mapping */ | 
 | 1975 | 		if (addr + len >= vma->vm_end) | 
 | 1976 | 			len = vma->vm_end - addr; | 
 | 1977 |  | 
 | 1978 | 		/* only read or write mappings where it is permitted */ | 
| David Howells | d00c7b9 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1979 | 		if (write && vma->vm_flags & VM_MAYWRITE) | 
| Jie Zhang | 7959722 | 2010-01-06 17:23:28 +0000 | [diff] [blame] | 1980 | 			copy_to_user_page(vma, NULL, addr, | 
 | 1981 | 					 (void *) addr, buf, len); | 
| David Howells | d00c7b9 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1982 | 		else if (!write && vma->vm_flags & VM_MAYREAD) | 
| Jie Zhang | 7959722 | 2010-01-06 17:23:28 +0000 | [diff] [blame] | 1983 | 			copy_from_user_page(vma, NULL, addr, | 
 | 1984 | 					    buf, (void *) addr, len); | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1985 | 		else | 
 | 1986 | 			len = 0; | 
 | 1987 | 	} else { | 
 | 1988 | 		len = 0; | 
 | 1989 | 	} | 
 | 1990 |  | 
 | 1991 | 	up_read(&mm->mmap_sem); | 
| Mike Frysinger | f55f199 | 2011-03-29 14:05:12 +0100 | [diff] [blame] | 1992 |  | 
 | 1993 | 	return len; | 
 | 1994 | } | 
 | 1995 |  | 
 | 1996 | /** | 
 | 1997 |  * @access_remote_vm - access another process' address space | 
 | 1998 |  * @mm:		the mm_struct of the target address space | 
 | 1999 |  * @addr:	start address to access | 
 | 2000 |  * @buf:	source or destination buffer | 
 | 2001 |  * @len:	number of bytes to transfer | 
 | 2002 |  * @write:	whether the access is a write | 
 | 2003 |  * | 
 | 2004 |  * The caller must hold a reference on @mm. | 
 | 2005 |  */ | 
 | 2006 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, | 
 | 2007 | 		void *buf, int len, int write) | 
 | 2008 | { | 
 | 2009 | 	return __access_remote_vm(NULL, mm, addr, buf, len, write); | 
 | 2010 | } | 
 | 2011 |  | 
 | 2012 | /* | 
 | 2013 |  * Access another process' address space. | 
 | 2014 |  * - source/target buffer must be kernel space | 
 | 2015 |  */ | 
 | 2016 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | 
 | 2017 | { | 
 | 2018 | 	struct mm_struct *mm; | 
 | 2019 |  | 
 | 2020 | 	if (addr + len < addr) | 
 | 2021 | 		return 0; | 
 | 2022 |  | 
 | 2023 | 	mm = get_task_mm(tsk); | 
 | 2024 | 	if (!mm) | 
 | 2025 | 		return 0; | 
 | 2026 |  | 
 | 2027 | 	len = __access_remote_vm(tsk, mm, addr, buf, len, write); | 
 | 2028 |  | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 2029 | 	mmput(mm); | 
 | 2030 | 	return len; | 
 | 2031 | } | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2032 |  | 
 | 2033 | /** | 
 | 2034 |  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode | 
 | 2035 |  * @inode: The inode to check | 
 | 2036 |  * @size: The current filesize of the inode | 
 | 2037 |  * @newsize: The proposed filesize of the inode | 
 | 2038 |  * | 
 | 2039 |  * Check the shared mappings on an inode on behalf of a shrinking truncate to | 
 | 2040 |  * make sure that that any outstanding VMAs aren't broken and then shrink the | 
 | 2041 |  * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't | 
 | 2042 |  * automatically grant mappings that are too large. | 
 | 2043 |  */ | 
 | 2044 | int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | 
 | 2045 | 				size_t newsize) | 
 | 2046 | { | 
 | 2047 | 	struct vm_area_struct *vma; | 
 | 2048 | 	struct prio_tree_iter iter; | 
 | 2049 | 	struct vm_region *region; | 
 | 2050 | 	pgoff_t low, high; | 
 | 2051 | 	size_t r_size, r_top; | 
 | 2052 |  | 
 | 2053 | 	low = newsize >> PAGE_SHIFT; | 
 | 2054 | 	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 2055 |  | 
 | 2056 | 	down_write(&nommu_region_sem); | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 2057 | 	mutex_lock(&inode->i_mapping->i_mmap_mutex); | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2058 |  | 
 | 2059 | 	/* search for VMAs that fall within the dead zone */ | 
 | 2060 | 	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, | 
 | 2061 | 			      low, high) { | 
 | 2062 | 		/* found one - only interested if it's shared out of the page | 
 | 2063 | 		 * cache */ | 
 | 2064 | 		if (vma->vm_flags & VM_SHARED) { | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 2065 | 			mutex_unlock(&inode->i_mapping->i_mmap_mutex); | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2066 | 			up_write(&nommu_region_sem); | 
 | 2067 | 			return -ETXTBSY; /* not quite true, but near enough */ | 
 | 2068 | 		} | 
 | 2069 | 	} | 
 | 2070 |  | 
 | 2071 | 	/* reduce any regions that overlap the dead zone - if in existence, | 
 | 2072 | 	 * these will be pointed to by VMAs that don't overlap the dead zone | 
 | 2073 | 	 * | 
 | 2074 | 	 * we don't check for any regions that start beyond the EOF as there | 
 | 2075 | 	 * shouldn't be any | 
 | 2076 | 	 */ | 
 | 2077 | 	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, | 
 | 2078 | 			      0, ULONG_MAX) { | 
 | 2079 | 		if (!(vma->vm_flags & VM_SHARED)) | 
 | 2080 | 			continue; | 
 | 2081 |  | 
 | 2082 | 		region = vma->vm_region; | 
 | 2083 | 		r_size = region->vm_top - region->vm_start; | 
 | 2084 | 		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; | 
 | 2085 |  | 
 | 2086 | 		if (r_top > newsize) { | 
 | 2087 | 			region->vm_top -= r_top - newsize; | 
 | 2088 | 			if (region->vm_end > region->vm_top) | 
 | 2089 | 				region->vm_end = region->vm_top; | 
 | 2090 | 		} | 
 | 2091 | 	} | 
 | 2092 |  | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 2093 | 	mutex_unlock(&inode->i_mapping->i_mmap_mutex); | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2094 | 	up_write(&nommu_region_sem); | 
 | 2095 | 	return 0; | 
 | 2096 | } |