| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/mm/nommu.c | 
|  | 3 | * | 
|  | 4 | *  Replacement code for mm functions to support CPU's that don't | 
|  | 5 | *  have any form of memory management unit (thus no virtual memory). | 
|  | 6 | * | 
|  | 7 | *  See Documentation/nommu-mmap.txt | 
|  | 8 | * | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 9 | *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> | 
|  | 11 | *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> | 
|  | 12 | *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com> | 
| Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 13 | *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ | 
|  | 15 |  | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 16 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> | 
|  | 18 | #include <linux/mman.h> | 
|  | 19 | #include <linux/swap.h> | 
|  | 20 | #include <linux/file.h> | 
|  | 21 | #include <linux/highmem.h> | 
|  | 22 | #include <linux/pagemap.h> | 
|  | 23 | #include <linux/slab.h> | 
|  | 24 | #include <linux/vmalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/blkdev.h> | 
|  | 26 | #include <linux/backing-dev.h> | 
|  | 27 | #include <linux/mount.h> | 
|  | 28 | #include <linux/personality.h> | 
|  | 29 | #include <linux/security.h> | 
|  | 30 | #include <linux/syscalls.h> | 
| Al Viro | 120a795 | 2010-10-30 02:54:44 -0400 | [diff] [blame] | 31 | #include <linux/audit.h> | 
| Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 32 | #include <linux/sched/sysctl.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
|  | 34 | #include <asm/uaccess.h> | 
|  | 35 | #include <asm/tlb.h> | 
|  | 36 | #include <asm/tlbflush.h> | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 37 | #include <asm/mmu_context.h> | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 38 | #include "internal.h" | 
|  | 39 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 40 | #if 0 | 
|  | 41 | #define kenter(FMT, ...) \ | 
|  | 42 | printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | 
|  | 43 | #define kleave(FMT, ...) \ | 
|  | 44 | printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | 
|  | 45 | #define kdebug(FMT, ...) \ | 
|  | 46 | printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) | 
|  | 47 | #else | 
|  | 48 | #define kenter(FMT, ...) \ | 
|  | 49 | no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | 
|  | 50 | #define kleave(FMT, ...) \ | 
|  | 51 | no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | 
|  | 52 | #define kdebug(FMT, ...) \ | 
|  | 53 | no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) | 
|  | 54 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
|  | 56 | void *high_memory; | 
|  | 57 | struct page *mem_map; | 
|  | 58 | unsigned long max_mapnr; | 
| Hugh Dickins | 4266c97 | 2009-09-23 17:05:53 +0100 | [diff] [blame] | 59 | unsigned long highest_memmap_pfn; | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 60 | struct percpu_counter vm_committed_as; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 
|  | 62 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 
|  | 63 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | 
| David Howells | fc4d5c2 | 2009-05-06 16:03:05 -0700 | [diff] [blame] | 64 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; | 
| Andrew Shewmaker | c9b1d09 | 2013-04-29 15:08:10 -0700 | [diff] [blame] | 65 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ | 
| Andrew Shewmaker | 4eeab4f | 2013-04-29 15:08:11 -0700 | [diff] [blame] | 66 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | int heap_stack_gap = 0; | 
|  | 68 |  | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 69 | atomic_long_t mmap_pages_allocated; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 70 |  | 
| K. Y. Srinivasan | 997071b | 2012-11-15 14:34:42 -0800 | [diff] [blame] | 71 | /* | 
|  | 72 | * The global memory commitment made in the system can be a metric | 
|  | 73 | * that can be used to drive ballooning decisions when Linux is hosted | 
|  | 74 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically | 
|  | 75 | * balancing memory across competing virtual machines that are hosted. | 
|  | 76 | * Several metrics drive this policy engine including the guest reported | 
|  | 77 | * memory commitment. | 
|  | 78 | */ | 
|  | 79 | unsigned long vm_memory_committed(void) | 
|  | 80 | { | 
|  | 81 | return percpu_counter_read_positive(&vm_committed_as); | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | EXPORT_SYMBOL_GPL(vm_memory_committed); | 
|  | 85 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | EXPORT_SYMBOL(mem_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 88 | /* list of mapped, potentially shareable regions */ | 
|  | 89 | static struct kmem_cache *vm_region_jar; | 
|  | 90 | struct rb_root nommu_region_tree = RB_ROOT; | 
|  | 91 | DECLARE_RWSEM(nommu_region_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 93 | const struct vm_operations_struct generic_file_vm_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | }; | 
|  | 95 |  | 
|  | 96 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | * Return the total memory allocated for this pointer, not | 
|  | 98 | * just what the caller asked for. | 
|  | 99 | * | 
|  | 100 | * Doesn't have to be accurate, i.e. may have races. | 
|  | 101 | */ | 
|  | 102 | unsigned int kobjsize(const void *objp) | 
|  | 103 | { | 
|  | 104 | struct page *page; | 
|  | 105 |  | 
| Michael Hennerich | 4016a13 | 2008-04-28 02:13:38 -0700 | [diff] [blame] | 106 | /* | 
|  | 107 | * If the object we have should not have ksize performed on it, | 
|  | 108 | * return size of 0 | 
|  | 109 | */ | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 110 | if (!objp || !virt_addr_valid(objp)) | 
| Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 111 | return 0; | 
|  | 112 |  | 
|  | 113 | page = virt_to_head_page(objp); | 
| Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 114 |  | 
|  | 115 | /* | 
|  | 116 | * If the allocator sets PageSlab, we know the pointer came from | 
|  | 117 | * kmalloc(). | 
|  | 118 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | if (PageSlab(page)) | 
|  | 120 | return ksize(objp); | 
|  | 121 |  | 
| Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 122 | /* | 
| Paul Mundt | ab2e83e | 2009-01-08 12:04:48 +0000 | [diff] [blame] | 123 | * If it's not a compound page, see if we have a matching VMA | 
|  | 124 | * region. This test is intentionally done in reverse order, | 
|  | 125 | * so if there's no VMA, we still fall through and hand back | 
|  | 126 | * PAGE_SIZE for 0-order pages. | 
|  | 127 | */ | 
|  | 128 | if (!PageCompound(page)) { | 
|  | 129 | struct vm_area_struct *vma; | 
|  | 130 |  | 
|  | 131 | vma = find_vma(current->mm, (unsigned long)objp); | 
|  | 132 | if (vma) | 
|  | 133 | return vma->vm_end - vma->vm_start; | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | /* | 
| Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 137 | * The ksize() function is only guaranteed to work for pointers | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 138 | * returned by kmalloc(). So handle arbitrary pointers here. | 
| Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 139 | */ | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 140 | return PAGE_SIZE << compound_order(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } | 
|  | 142 |  | 
| Michel Lespinasse | 28a3571 | 2013-02-22 16:35:55 -0800 | [diff] [blame] | 143 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 144 | unsigned long start, unsigned long nr_pages, | 
|  | 145 | unsigned int foll_flags, struct page **pages, | 
|  | 146 | struct vm_area_struct **vmas, int *nonblocking) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | { | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 148 | struct vm_area_struct *vma; | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 149 | unsigned long vm_flags; | 
|  | 150 | int i; | 
|  | 151 |  | 
|  | 152 | /* calculate required read or write permissions. | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 153 | * If FOLL_FORCE is set, we only require the "MAY" flags. | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 154 | */ | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 155 | vm_flags  = (foll_flags & FOLL_WRITE) ? | 
|  | 156 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 
|  | 157 | vm_flags &= (foll_flags & FOLL_FORCE) ? | 
|  | 158 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 |  | 
| Peter Zijlstra | 9d73777 | 2009-06-25 11:58:55 +0200 | [diff] [blame] | 160 | for (i = 0; i < nr_pages; i++) { | 
| David Howells | 7561e8c | 2010-03-25 16:48:38 +0000 | [diff] [blame] | 161 | vma = find_vma(mm, start); | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 162 | if (!vma) | 
|  | 163 | goto finish_or_fault; | 
|  | 164 |  | 
|  | 165 | /* protect what we can, including chardevs */ | 
| Hugh Dickins | 1c3aff1 | 2009-09-21 17:03:24 -0700 | [diff] [blame] | 166 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || | 
|  | 167 | !(vm_flags & vma->vm_flags)) | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 168 | goto finish_or_fault; | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 169 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | if (pages) { | 
|  | 171 | pages[i] = virt_to_page(start); | 
|  | 172 | if (pages[i]) | 
|  | 173 | page_cache_get(pages[i]); | 
|  | 174 | } | 
|  | 175 | if (vmas) | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 176 | vmas[i] = vma; | 
| David Howells | e1ee65d | 2010-03-25 16:48:44 +0000 | [diff] [blame] | 177 | start = (start + PAGE_SIZE) & PAGE_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | } | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 179 |  | 
|  | 180 | return i; | 
|  | 181 |  | 
|  | 182 | finish_or_fault: | 
|  | 183 | return i ? : -EFAULT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | } | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 185 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 186 | /* | 
|  | 187 | * get a list of pages in an address range belonging to the specified process | 
|  | 188 | * and indicate the VMA that covers each page | 
|  | 189 | * - this is potentially dodgy as we may end incrementing the page count of a | 
|  | 190 | *   slab page or a secondary page from a compound page | 
|  | 191 | * - don't permit access to VMAs that don't support it, such as I/O mappings | 
|  | 192 | */ | 
| Michel Lespinasse | 28a3571 | 2013-02-22 16:35:55 -0800 | [diff] [blame] | 193 | long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 194 | unsigned long start, unsigned long nr_pages, | 
|  | 195 | int write, int force, struct page **pages, | 
|  | 196 | struct vm_area_struct **vmas) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 197 | { | 
|  | 198 | int flags = 0; | 
|  | 199 |  | 
|  | 200 | if (write) | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 201 | flags |= FOLL_WRITE; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 202 | if (force) | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 203 | flags |= FOLL_FORCE; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 204 |  | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 205 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, | 
|  | 206 | NULL); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 207 | } | 
| Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 208 | EXPORT_SYMBOL(get_user_pages); | 
|  | 209 |  | 
| Paul Mundt | dfc2f91 | 2009-06-26 04:31:57 +0900 | [diff] [blame] | 210 | /** | 
|  | 211 | * follow_pfn - look up PFN at a user virtual address | 
|  | 212 | * @vma: memory mapping | 
|  | 213 | * @address: user virtual address | 
|  | 214 | * @pfn: location to store found PFN | 
|  | 215 | * | 
|  | 216 | * Only IO mappings and raw PFN mappings are allowed. | 
|  | 217 | * | 
|  | 218 | * Returns zero and the pfn at @pfn on success, -ve otherwise. | 
|  | 219 | */ | 
|  | 220 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | 
|  | 221 | unsigned long *pfn) | 
|  | 222 | { | 
|  | 223 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) | 
|  | 224 | return -EINVAL; | 
|  | 225 |  | 
|  | 226 | *pfn = address >> PAGE_SHIFT; | 
|  | 227 | return 0; | 
|  | 228 | } | 
|  | 229 | EXPORT_SYMBOL(follow_pfn); | 
|  | 230 |  | 
| Joonsoo Kim | f1c4069 | 2013-04-29 15:07:37 -0700 | [diff] [blame] | 231 | LIST_HEAD(vmap_area_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 233 | void vfree(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | { | 
|  | 235 | kfree(addr); | 
|  | 236 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 237 | EXPORT_SYMBOL(vfree); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 239 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { | 
|  | 241 | /* | 
| Robert P. J. Day | 8518609 | 2007-10-19 23:11:38 +0200 | [diff] [blame] | 242 | *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() | 
|  | 243 | * returns only a logical address. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | */ | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 245 | return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 247 | EXPORT_SYMBOL(__vmalloc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 |  | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 249 | void *vmalloc_user(unsigned long size) | 
|  | 250 | { | 
|  | 251 | void *ret; | 
|  | 252 |  | 
|  | 253 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 
|  | 254 | PAGE_KERNEL); | 
|  | 255 | if (ret) { | 
|  | 256 | struct vm_area_struct *vma; | 
|  | 257 |  | 
|  | 258 | down_write(¤t->mm->mmap_sem); | 
|  | 259 | vma = find_vma(current->mm, (unsigned long)ret); | 
|  | 260 | if (vma) | 
|  | 261 | vma->vm_flags |= VM_USERMAP; | 
|  | 262 | up_write(¤t->mm->mmap_sem); | 
|  | 263 | } | 
|  | 264 |  | 
|  | 265 | return ret; | 
|  | 266 | } | 
|  | 267 | EXPORT_SYMBOL(vmalloc_user); | 
|  | 268 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 269 | struct page *vmalloc_to_page(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | { | 
|  | 271 | return virt_to_page(addr); | 
|  | 272 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 273 | EXPORT_SYMBOL(vmalloc_to_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 275 | unsigned long vmalloc_to_pfn(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { | 
|  | 277 | return page_to_pfn(virt_to_page(addr)); | 
|  | 278 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 279 | EXPORT_SYMBOL(vmalloc_to_pfn); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 |  | 
|  | 281 | long vread(char *buf, char *addr, unsigned long count) | 
|  | 282 | { | 
| Chen Gang | 9bde916 | 2013-07-03 15:02:36 -0700 | [diff] [blame] | 283 | /* Don't allow overflow */ | 
|  | 284 | if ((unsigned long) buf + count < count) | 
|  | 285 | count = -(unsigned long) buf; | 
|  | 286 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | memcpy(buf, addr, count); | 
|  | 288 | return count; | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 | long vwrite(char *buf, char *addr, unsigned long count) | 
|  | 292 | { | 
|  | 293 | /* Don't allow overflow */ | 
|  | 294 | if ((unsigned long) addr + count < count) | 
|  | 295 | count = -(unsigned long) addr; | 
|  | 296 |  | 
|  | 297 | memcpy(addr, buf, count); | 
|  | 298 | return(count); | 
|  | 299 | } | 
|  | 300 |  | 
|  | 301 | /* | 
|  | 302 | *	vmalloc  -  allocate virtually continguos memory | 
|  | 303 | * | 
|  | 304 | *	@size:		allocation size | 
|  | 305 | * | 
|  | 306 | *	Allocate enough pages to cover @size from the page level | 
|  | 307 | *	allocator and map them into continguos kernel virtual space. | 
|  | 308 | * | 
| Michael Opdenacker | c1c8897 | 2006-10-03 23:21:02 +0200 | [diff] [blame] | 309 | *	For tight control over page level allocator and protection flags | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | *	use __vmalloc() instead. | 
|  | 311 | */ | 
|  | 312 | void *vmalloc(unsigned long size) | 
|  | 313 | { | 
|  | 314 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | 
|  | 315 | } | 
| Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 316 | EXPORT_SYMBOL(vmalloc); | 
|  | 317 |  | 
| Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 318 | /* | 
|  | 319 | *	vzalloc - allocate virtually continguos memory with zero fill | 
|  | 320 | * | 
|  | 321 | *	@size:		allocation size | 
|  | 322 | * | 
|  | 323 | *	Allocate enough pages to cover @size from the page level | 
|  | 324 | *	allocator and map them into continguos kernel virtual space. | 
|  | 325 | *	The memory allocated is set to zero. | 
|  | 326 | * | 
|  | 327 | *	For tight control over page level allocator and protection flags | 
|  | 328 | *	use __vmalloc() instead. | 
|  | 329 | */ | 
|  | 330 | void *vzalloc(unsigned long size) | 
|  | 331 | { | 
|  | 332 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 
|  | 333 | PAGE_KERNEL); | 
|  | 334 | } | 
|  | 335 | EXPORT_SYMBOL(vzalloc); | 
|  | 336 |  | 
|  | 337 | /** | 
|  | 338 | * vmalloc_node - allocate memory on a specific node | 
|  | 339 | * @size:	allocation size | 
|  | 340 | * @node:	numa node | 
|  | 341 | * | 
|  | 342 | * Allocate enough pages to cover @size from the page level | 
|  | 343 | * allocator and map them into contiguous kernel virtual space. | 
|  | 344 | * | 
|  | 345 | * For tight control over page level allocator and protection flags | 
|  | 346 | * use __vmalloc() instead. | 
|  | 347 | */ | 
| Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 348 | void *vmalloc_node(unsigned long size, int node) | 
|  | 349 | { | 
|  | 350 | return vmalloc(size); | 
|  | 351 | } | 
| Paul Mundt | 9a14f65 | 2010-12-24 11:50:34 +0900 | [diff] [blame] | 352 | EXPORT_SYMBOL(vmalloc_node); | 
| Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 353 |  | 
|  | 354 | /** | 
|  | 355 | * vzalloc_node - allocate memory on a specific node with zero fill | 
|  | 356 | * @size:	allocation size | 
|  | 357 | * @node:	numa node | 
|  | 358 | * | 
|  | 359 | * Allocate enough pages to cover @size from the page level | 
|  | 360 | * allocator and map them into contiguous kernel virtual space. | 
|  | 361 | * The memory allocated is set to zero. | 
|  | 362 | * | 
|  | 363 | * For tight control over page level allocator and protection flags | 
|  | 364 | * use __vmalloc() instead. | 
|  | 365 | */ | 
|  | 366 | void *vzalloc_node(unsigned long size, int node) | 
|  | 367 | { | 
|  | 368 | return vzalloc(size); | 
|  | 369 | } | 
|  | 370 | EXPORT_SYMBOL(vzalloc_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 |  | 
| Paul Mundt | 1af446e | 2008-08-04 16:01:47 +0900 | [diff] [blame] | 372 | #ifndef PAGE_KERNEL_EXEC | 
|  | 373 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | 
|  | 374 | #endif | 
|  | 375 |  | 
|  | 376 | /** | 
|  | 377 | *	vmalloc_exec  -  allocate virtually contiguous, executable memory | 
|  | 378 | *	@size:		allocation size | 
|  | 379 | * | 
|  | 380 | *	Kernel-internal function to allocate enough pages to cover @size | 
|  | 381 | *	the page level allocator and map them into contiguous and | 
|  | 382 | *	executable kernel virtual space. | 
|  | 383 | * | 
|  | 384 | *	For tight control over page level allocator and protection flags | 
|  | 385 | *	use __vmalloc() instead. | 
|  | 386 | */ | 
|  | 387 |  | 
|  | 388 | void *vmalloc_exec(unsigned long size) | 
|  | 389 | { | 
|  | 390 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | 
|  | 391 | } | 
|  | 392 |  | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 393 | /** | 
|  | 394 | * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | *	@size:		allocation size | 
|  | 396 | * | 
|  | 397 | *	Allocate enough 32bit PA addressable pages to cover @size from the | 
|  | 398 | *	page level allocator and map them into continguos kernel virtual space. | 
|  | 399 | */ | 
|  | 400 | void *vmalloc_32(unsigned long size) | 
|  | 401 | { | 
|  | 402 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); | 
|  | 403 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 404 | EXPORT_SYMBOL(vmalloc_32); | 
|  | 405 |  | 
|  | 406 | /** | 
|  | 407 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory | 
|  | 408 | *	@size:		allocation size | 
|  | 409 | * | 
|  | 410 | * The resulting memory area is 32bit addressable and zeroed so it can be | 
|  | 411 | * mapped to userspace without leaking data. | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 412 | * | 
|  | 413 | * VM_USERMAP is set on the corresponding VMA so that subsequent calls to | 
|  | 414 | * remap_vmalloc_range() are permissible. | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 415 | */ | 
|  | 416 | void *vmalloc_32_user(unsigned long size) | 
|  | 417 | { | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 418 | /* | 
|  | 419 | * We'll have to sort out the ZONE_DMA bits for 64-bit, | 
|  | 420 | * but for now this can simply use vmalloc_user() directly. | 
|  | 421 | */ | 
|  | 422 | return vmalloc_user(size); | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 423 | } | 
|  | 424 | EXPORT_SYMBOL(vmalloc_32_user); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 |  | 
|  | 426 | void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) | 
|  | 427 | { | 
|  | 428 | BUG(); | 
|  | 429 | return NULL; | 
|  | 430 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 431 | EXPORT_SYMBOL(vmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 433 | void vunmap(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | { | 
|  | 435 | BUG(); | 
|  | 436 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 437 | EXPORT_SYMBOL(vunmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 |  | 
| Paul Mundt | eb6434d | 2009-01-21 17:45:47 +0900 | [diff] [blame] | 439 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) | 
|  | 440 | { | 
|  | 441 | BUG(); | 
|  | 442 | return NULL; | 
|  | 443 | } | 
|  | 444 | EXPORT_SYMBOL(vm_map_ram); | 
|  | 445 |  | 
|  | 446 | void vm_unmap_ram(const void *mem, unsigned int count) | 
|  | 447 | { | 
|  | 448 | BUG(); | 
|  | 449 | } | 
|  | 450 | EXPORT_SYMBOL(vm_unmap_ram); | 
|  | 451 |  | 
|  | 452 | void vm_unmap_aliases(void) | 
|  | 453 | { | 
|  | 454 | } | 
|  | 455 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); | 
|  | 456 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | /* | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 458 | * Implement a stub for vmalloc_sync_all() if the architecture chose not to | 
|  | 459 | * have one. | 
|  | 460 | */ | 
|  | 461 | void  __attribute__((weak)) vmalloc_sync_all(void) | 
|  | 462 | { | 
|  | 463 | } | 
|  | 464 |  | 
| Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 465 | /** | 
|  | 466 | *	alloc_vm_area - allocate a range of kernel address space | 
|  | 467 | *	@size:		size of the area | 
|  | 468 | * | 
|  | 469 | *	Returns:	NULL on failure, vm_struct on success | 
|  | 470 | * | 
|  | 471 | *	This function reserves a range of kernel address space, and | 
|  | 472 | *	allocates pagetables to map that range.  No actual mappings | 
|  | 473 | *	are created.  If the kernel address space is not shared | 
|  | 474 | *	between processes, it syncs the pagetable across all | 
|  | 475 | *	processes. | 
|  | 476 | */ | 
| David Vrabel | cd12909 | 2011-09-29 16:53:32 +0100 | [diff] [blame] | 477 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) | 
| Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 478 | { | 
|  | 479 | BUG(); | 
|  | 480 | return NULL; | 
|  | 481 | } | 
|  | 482 | EXPORT_SYMBOL_GPL(alloc_vm_area); | 
|  | 483 |  | 
|  | 484 | void free_vm_area(struct vm_struct *area) | 
|  | 485 | { | 
|  | 486 | BUG(); | 
|  | 487 | } | 
|  | 488 | EXPORT_SYMBOL_GPL(free_vm_area); | 
|  | 489 |  | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 490 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, | 
|  | 491 | struct page *page) | 
|  | 492 | { | 
|  | 493 | return -EINVAL; | 
|  | 494 | } | 
|  | 495 | EXPORT_SYMBOL(vm_insert_page); | 
|  | 496 |  | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 497 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | *  sys_brk() for the most part doesn't need the global kernel | 
|  | 499 | *  lock, except when an application is doing something nasty | 
|  | 500 | *  like trying to un-brk an area that has already been mapped | 
|  | 501 | *  to a regular file.  in this case, the unmapping will need | 
|  | 502 | *  to invoke file system routines that need the global lock. | 
|  | 503 | */ | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 504 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | { | 
|  | 506 | struct mm_struct *mm = current->mm; | 
|  | 507 |  | 
|  | 508 | if (brk < mm->start_brk || brk > mm->context.end_brk) | 
|  | 509 | return mm->brk; | 
|  | 510 |  | 
|  | 511 | if (mm->brk == brk) | 
|  | 512 | return mm->brk; | 
|  | 513 |  | 
|  | 514 | /* | 
|  | 515 | * Always allow shrinking brk | 
|  | 516 | */ | 
|  | 517 | if (brk <= mm->brk) { | 
|  | 518 | mm->brk = brk; | 
|  | 519 | return brk; | 
|  | 520 | } | 
|  | 521 |  | 
|  | 522 | /* | 
|  | 523 | * Ok, looks good - let it rip. | 
|  | 524 | */ | 
| Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 525 | flush_icache_range(mm->brk, brk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | return mm->brk = brk; | 
|  | 527 | } | 
|  | 528 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 529 | /* | 
|  | 530 | * initialise the VMA and region record slabs | 
|  | 531 | */ | 
|  | 532 | void __init mmap_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | { | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 534 | int ret; | 
|  | 535 |  | 
|  | 536 | ret = percpu_counter_init(&vm_committed_as, 0); | 
|  | 537 | VM_BUG_ON(ret); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 538 | vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 539 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 541 | /* | 
|  | 542 | * validate the region tree | 
|  | 543 | * - the caller must hold the region lock | 
|  | 544 | */ | 
|  | 545 | #ifdef CONFIG_DEBUG_NOMMU_REGIONS | 
|  | 546 | static noinline void validate_nommu_regions(void) | 
|  | 547 | { | 
|  | 548 | struct vm_region *region, *last; | 
|  | 549 | struct rb_node *p, *lastp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 551 | lastp = rb_first(&nommu_region_tree); | 
|  | 552 | if (!lastp) | 
|  | 553 | return; | 
|  | 554 |  | 
|  | 555 | last = rb_entry(lastp, struct vm_region, vm_rb); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 556 | BUG_ON(unlikely(last->vm_end <= last->vm_start)); | 
|  | 557 | BUG_ON(unlikely(last->vm_top < last->vm_end)); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 558 |  | 
|  | 559 | while ((p = rb_next(lastp))) { | 
|  | 560 | region = rb_entry(p, struct vm_region, vm_rb); | 
|  | 561 | last = rb_entry(lastp, struct vm_region, vm_rb); | 
|  | 562 |  | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 563 | BUG_ON(unlikely(region->vm_end <= region->vm_start)); | 
|  | 564 | BUG_ON(unlikely(region->vm_top < region->vm_end)); | 
|  | 565 | BUG_ON(unlikely(region->vm_start < last->vm_top)); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 566 |  | 
|  | 567 | lastp = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | } | 
|  | 569 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 570 | #else | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 571 | static void validate_nommu_regions(void) | 
|  | 572 | { | 
|  | 573 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 574 | #endif | 
|  | 575 |  | 
|  | 576 | /* | 
|  | 577 | * add a region into the global tree | 
|  | 578 | */ | 
|  | 579 | static void add_nommu_region(struct vm_region *region) | 
|  | 580 | { | 
|  | 581 | struct vm_region *pregion; | 
|  | 582 | struct rb_node **p, *parent; | 
|  | 583 |  | 
|  | 584 | validate_nommu_regions(); | 
|  | 585 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 586 | parent = NULL; | 
|  | 587 | p = &nommu_region_tree.rb_node; | 
|  | 588 | while (*p) { | 
|  | 589 | parent = *p; | 
|  | 590 | pregion = rb_entry(parent, struct vm_region, vm_rb); | 
|  | 591 | if (region->vm_start < pregion->vm_start) | 
|  | 592 | p = &(*p)->rb_left; | 
|  | 593 | else if (region->vm_start > pregion->vm_start) | 
|  | 594 | p = &(*p)->rb_right; | 
|  | 595 | else if (pregion == region) | 
|  | 596 | return; | 
|  | 597 | else | 
|  | 598 | BUG(); | 
|  | 599 | } | 
|  | 600 |  | 
|  | 601 | rb_link_node(®ion->vm_rb, parent, p); | 
|  | 602 | rb_insert_color(®ion->vm_rb, &nommu_region_tree); | 
|  | 603 |  | 
|  | 604 | validate_nommu_regions(); | 
|  | 605 | } | 
|  | 606 |  | 
|  | 607 | /* | 
|  | 608 | * delete a region from the global tree | 
|  | 609 | */ | 
|  | 610 | static void delete_nommu_region(struct vm_region *region) | 
|  | 611 | { | 
|  | 612 | BUG_ON(!nommu_region_tree.rb_node); | 
|  | 613 |  | 
|  | 614 | validate_nommu_regions(); | 
|  | 615 | rb_erase(®ion->vm_rb, &nommu_region_tree); | 
|  | 616 | validate_nommu_regions(); | 
|  | 617 | } | 
|  | 618 |  | 
|  | 619 | /* | 
|  | 620 | * free a contiguous series of pages | 
|  | 621 | */ | 
|  | 622 | static void free_page_series(unsigned long from, unsigned long to) | 
|  | 623 | { | 
|  | 624 | for (; from < to; from += PAGE_SIZE) { | 
|  | 625 | struct page *page = virt_to_page(from); | 
|  | 626 |  | 
|  | 627 | kdebug("- free %lx", from); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 628 | atomic_long_dec(&mmap_pages_allocated); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 629 | if (page_count(page) != 1) | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 630 | kdebug("free page %p: refcount not one: %d", | 
|  | 631 | page, page_count(page)); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 632 | put_page(page); | 
|  | 633 | } | 
|  | 634 | } | 
|  | 635 |  | 
|  | 636 | /* | 
|  | 637 | * release a reference to a region | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 638 | * - the caller must hold the region semaphore for writing, which this releases | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 639 | * - the region may not have been added to the tree yet, in which case vm_top | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 640 | *   will equal vm_start | 
|  | 641 | */ | 
|  | 642 | static void __put_nommu_region(struct vm_region *region) | 
|  | 643 | __releases(nommu_region_sem) | 
|  | 644 | { | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 645 | kenter("%p{%d}", region, region->vm_usage); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 646 |  | 
|  | 647 | BUG_ON(!nommu_region_tree.rb_node); | 
|  | 648 |  | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 649 | if (--region->vm_usage == 0) { | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 650 | if (region->vm_top > region->vm_start) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 651 | delete_nommu_region(region); | 
|  | 652 | up_write(&nommu_region_sem); | 
|  | 653 |  | 
|  | 654 | if (region->vm_file) | 
|  | 655 | fput(region->vm_file); | 
|  | 656 |  | 
|  | 657 | /* IO memory and memory shared directly out of the pagecache | 
|  | 658 | * from ramfs/tmpfs mustn't be released here */ | 
|  | 659 | if (region->vm_flags & VM_MAPPED_COPY) { | 
|  | 660 | kdebug("free series"); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 661 | free_page_series(region->vm_start, region->vm_top); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 662 | } | 
|  | 663 | kmem_cache_free(vm_region_jar, region); | 
|  | 664 | } else { | 
|  | 665 | up_write(&nommu_region_sem); | 
|  | 666 | } | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | /* | 
|  | 670 | * release a reference to a region | 
|  | 671 | */ | 
|  | 672 | static void put_nommu_region(struct vm_region *region) | 
|  | 673 | { | 
|  | 674 | down_write(&nommu_region_sem); | 
|  | 675 | __put_nommu_region(region); | 
|  | 676 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 678 | /* | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 679 | * update protection on a vma | 
|  | 680 | */ | 
|  | 681 | static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | 
|  | 682 | { | 
|  | 683 | #ifdef CONFIG_MPU | 
|  | 684 | struct mm_struct *mm = vma->vm_mm; | 
|  | 685 | long start = vma->vm_start & PAGE_MASK; | 
|  | 686 | while (start < vma->vm_end) { | 
|  | 687 | protect_page(mm, start, flags); | 
|  | 688 | start += PAGE_SIZE; | 
|  | 689 | } | 
|  | 690 | update_protections(mm); | 
|  | 691 | #endif | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | /* | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 695 | * add a VMA into a process's mm_struct in the appropriate place in the list | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 696 | * and tree and add to the address space's page tree also if not an anonymous | 
|  | 697 | * page | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 698 | * - should be called with mm->mmap_sem held writelocked | 
|  | 699 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 700 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 701 | { | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 702 | struct vm_area_struct *pvma, *prev; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 703 | struct address_space *mapping; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 704 | struct rb_node **p, *parent, *rb_prev; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 705 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 706 | kenter(",%p", vma); | 
|  | 707 |  | 
|  | 708 | BUG_ON(!vma->vm_region); | 
|  | 709 |  | 
|  | 710 | mm->map_count++; | 
|  | 711 | vma->vm_mm = mm; | 
|  | 712 |  | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 713 | protect_vma(vma, vma->vm_flags); | 
|  | 714 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 715 | /* add the VMA to the mapping */ | 
|  | 716 | if (vma->vm_file) { | 
|  | 717 | mapping = vma->vm_file->f_mapping; | 
|  | 718 |  | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 719 | mutex_lock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 720 | flush_dcache_mmap_lock(mapping); | 
| Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 721 | vma_interval_tree_insert(vma, &mapping->i_mmap); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 722 | flush_dcache_mmap_unlock(mapping); | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 723 | mutex_unlock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 724 | } | 
|  | 725 |  | 
|  | 726 | /* add the VMA to the tree */ | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 727 | parent = rb_prev = NULL; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 728 | p = &mm->mm_rb.rb_node; | 
|  | 729 | while (*p) { | 
|  | 730 | parent = *p; | 
|  | 731 | pvma = rb_entry(parent, struct vm_area_struct, vm_rb); | 
|  | 732 |  | 
|  | 733 | /* sort by: start addr, end addr, VMA struct addr in that order | 
|  | 734 | * (the latter is necessary as we may get identical VMAs) */ | 
|  | 735 | if (vma->vm_start < pvma->vm_start) | 
|  | 736 | p = &(*p)->rb_left; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 737 | else if (vma->vm_start > pvma->vm_start) { | 
|  | 738 | rb_prev = parent; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 739 | p = &(*p)->rb_right; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 740 | } else if (vma->vm_end < pvma->vm_end) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 741 | p = &(*p)->rb_left; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 742 | else if (vma->vm_end > pvma->vm_end) { | 
|  | 743 | rb_prev = parent; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 744 | p = &(*p)->rb_right; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 745 | } else if (vma < pvma) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 746 | p = &(*p)->rb_left; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 747 | else if (vma > pvma) { | 
|  | 748 | rb_prev = parent; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 749 | p = &(*p)->rb_right; | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 750 | } else | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 751 | BUG(); | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | rb_link_node(&vma->vm_rb, parent, p); | 
|  | 755 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); | 
|  | 756 |  | 
|  | 757 | /* add VMA to the VMA list also */ | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 758 | prev = NULL; | 
|  | 759 | if (rb_prev) | 
|  | 760 | prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 761 |  | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 762 | __vma_link_list(mm, vma, prev, parent); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 763 | } | 
|  | 764 |  | 
|  | 765 | /* | 
|  | 766 | * delete a VMA from its owning mm_struct and address space | 
|  | 767 | */ | 
|  | 768 | static void delete_vma_from_mm(struct vm_area_struct *vma) | 
|  | 769 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 770 | struct address_space *mapping; | 
|  | 771 | struct mm_struct *mm = vma->vm_mm; | 
|  | 772 |  | 
|  | 773 | kenter("%p", vma); | 
|  | 774 |  | 
| Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 775 | protect_vma(vma, 0); | 
|  | 776 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 777 | mm->map_count--; | 
|  | 778 | if (mm->mmap_cache == vma) | 
|  | 779 | mm->mmap_cache = NULL; | 
|  | 780 |  | 
|  | 781 | /* remove the VMA from the mapping */ | 
|  | 782 | if (vma->vm_file) { | 
|  | 783 | mapping = vma->vm_file->f_mapping; | 
|  | 784 |  | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 785 | mutex_lock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 786 | flush_dcache_mmap_lock(mapping); | 
| Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 787 | vma_interval_tree_remove(vma, &mapping->i_mmap); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 788 | flush_dcache_mmap_unlock(mapping); | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 789 | mutex_unlock(&mapping->i_mmap_mutex); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 790 | } | 
|  | 791 |  | 
|  | 792 | /* remove from the MM's tree and list */ | 
|  | 793 | rb_erase(&vma->vm_rb, &mm->mm_rb); | 
| Namhyung Kim | b951bf2 | 2011-05-24 17:11:23 -0700 | [diff] [blame] | 794 |  | 
|  | 795 | if (vma->vm_prev) | 
|  | 796 | vma->vm_prev->vm_next = vma->vm_next; | 
|  | 797 | else | 
|  | 798 | mm->mmap = vma->vm_next; | 
|  | 799 |  | 
|  | 800 | if (vma->vm_next) | 
|  | 801 | vma->vm_next->vm_prev = vma->vm_prev; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 802 | } | 
|  | 803 |  | 
|  | 804 | /* | 
|  | 805 | * destroy a VMA record | 
|  | 806 | */ | 
|  | 807 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) | 
|  | 808 | { | 
|  | 809 | kenter("%p", vma); | 
|  | 810 | if (vma->vm_ops && vma->vm_ops->close) | 
|  | 811 | vma->vm_ops->close(vma); | 
| Konstantin Khlebnikov | e9714ac | 2012-10-08 16:28:54 -0700 | [diff] [blame] | 812 | if (vma->vm_file) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 813 | fput(vma->vm_file); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 814 | put_nommu_region(vma->vm_region); | 
|  | 815 | kmem_cache_free(vm_area_cachep, vma); | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 816 | } | 
|  | 817 |  | 
|  | 818 | /* | 
|  | 819 | * look up the first VMA in which addr resides, NULL if none | 
|  | 820 | * - should be called with mm->mmap_sem at least held readlocked | 
|  | 821 | */ | 
|  | 822 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | 
|  | 823 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 824 | struct vm_area_struct *vma; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 825 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 826 | /* check the cache first */ | 
| Jan Stancek | b6a9b7f | 2013-04-04 11:35:10 -0700 | [diff] [blame] | 827 | vma = ACCESS_ONCE(mm->mmap_cache); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 828 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) | 
|  | 829 | return vma; | 
|  | 830 |  | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 831 | /* trawl the list (there may be multiple mappings in which addr | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 832 | * resides) */ | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 833 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 834 | if (vma->vm_start > addr) | 
|  | 835 | return NULL; | 
|  | 836 | if (vma->vm_end > addr) { | 
|  | 837 | mm->mmap_cache = vma; | 
|  | 838 | return vma; | 
|  | 839 | } | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 840 | } | 
|  | 841 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 842 | return NULL; | 
|  | 843 | } | 
|  | 844 | EXPORT_SYMBOL(find_vma); | 
|  | 845 |  | 
|  | 846 | /* | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 847 | * find a VMA | 
|  | 848 | * - we don't extend stack VMAs under NOMMU conditions | 
|  | 849 | */ | 
|  | 850 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | 
|  | 851 | { | 
| David Howells | 7561e8c | 2010-03-25 16:48:38 +0000 | [diff] [blame] | 852 | return find_vma(mm, addr); | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 853 | } | 
|  | 854 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 855 | /* | 
|  | 856 | * expand a stack to a given address | 
|  | 857 | * - not supported under NOMMU conditions | 
|  | 858 | */ | 
| Greg Ungerer | 57c8f63 | 2007-07-15 23:38:28 -0700 | [diff] [blame] | 859 | int expand_stack(struct vm_area_struct *vma, unsigned long address) | 
|  | 860 | { | 
|  | 861 | return -ENOMEM; | 
|  | 862 | } | 
|  | 863 |  | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 864 | /* | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 865 | * look up the first VMA exactly that exactly matches addr | 
|  | 866 | * - should be called with mm->mmap_sem at least held readlocked | 
|  | 867 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 868 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, | 
|  | 869 | unsigned long addr, | 
|  | 870 | unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | { | 
|  | 872 | struct vm_area_struct *vma; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 873 | unsigned long end = addr + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 875 | /* check the cache first */ | 
|  | 876 | vma = mm->mmap_cache; | 
|  | 877 | if (vma && vma->vm_start == addr && vma->vm_end == end) | 
|  | 878 | return vma; | 
|  | 879 |  | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 880 | /* trawl the list (there may be multiple mappings in which addr | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 881 | * resides) */ | 
| Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 882 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 883 | if (vma->vm_start < addr) | 
|  | 884 | continue; | 
|  | 885 | if (vma->vm_start > addr) | 
|  | 886 | return NULL; | 
|  | 887 | if (vma->vm_end == end) { | 
|  | 888 | mm->mmap_cache = vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | return vma; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 890 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | } | 
|  | 892 |  | 
|  | 893 | return NULL; | 
|  | 894 | } | 
|  | 895 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 896 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | * determine whether a mapping should be permitted and, if so, what sort of | 
|  | 898 | * mapping we're capable of supporting | 
|  | 899 | */ | 
|  | 900 | static int validate_mmap_request(struct file *file, | 
|  | 901 | unsigned long addr, | 
|  | 902 | unsigned long len, | 
|  | 903 | unsigned long prot, | 
|  | 904 | unsigned long flags, | 
|  | 905 | unsigned long pgoff, | 
|  | 906 | unsigned long *_capabilities) | 
|  | 907 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 908 | unsigned long capabilities, rlen; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | int ret; | 
|  | 910 |  | 
|  | 911 | /* do the simple checks first */ | 
| David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 912 | if (flags & MAP_FIXED) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | printk(KERN_DEBUG | 
|  | 914 | "%d: Can't do fixed-address/overlay mmap of RAM\n", | 
|  | 915 | current->pid); | 
|  | 916 | return -EINVAL; | 
|  | 917 | } | 
|  | 918 |  | 
|  | 919 | if ((flags & MAP_TYPE) != MAP_PRIVATE && | 
|  | 920 | (flags & MAP_TYPE) != MAP_SHARED) | 
|  | 921 | return -EINVAL; | 
|  | 922 |  | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 923 | if (!len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | return -EINVAL; | 
|  | 925 |  | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 926 | /* Careful about overflows.. */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 927 | rlen = PAGE_ALIGN(len); | 
|  | 928 | if (!rlen || rlen > TASK_SIZE) | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 929 | return -ENOMEM; | 
|  | 930 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | /* offset overflow? */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 932 | if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 933 | return -EOVERFLOW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 |  | 
|  | 935 | if (file) { | 
|  | 936 | /* validate file mapping requests */ | 
|  | 937 | struct address_space *mapping; | 
|  | 938 |  | 
|  | 939 | /* files must support mmap */ | 
|  | 940 | if (!file->f_op || !file->f_op->mmap) | 
|  | 941 | return -ENODEV; | 
|  | 942 |  | 
|  | 943 | /* work out if what we've got could possibly be shared | 
|  | 944 | * - we support chardevs that provide their own "memory" | 
|  | 945 | * - we support files/blockdevs that are memory backed | 
|  | 946 | */ | 
|  | 947 | mapping = file->f_mapping; | 
|  | 948 | if (!mapping) | 
| Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 949 | mapping = file_inode(file)->i_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 |  | 
|  | 951 | capabilities = 0; | 
|  | 952 | if (mapping && mapping->backing_dev_info) | 
|  | 953 | capabilities = mapping->backing_dev_info->capabilities; | 
|  | 954 |  | 
|  | 955 | if (!capabilities) { | 
|  | 956 | /* no explicit capabilities set, so assume some | 
|  | 957 | * defaults */ | 
| Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 958 | switch (file_inode(file)->i_mode & S_IFMT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | case S_IFREG: | 
|  | 960 | case S_IFBLK: | 
|  | 961 | capabilities = BDI_CAP_MAP_COPY; | 
|  | 962 | break; | 
|  | 963 |  | 
|  | 964 | case S_IFCHR: | 
|  | 965 | capabilities = | 
|  | 966 | BDI_CAP_MAP_DIRECT | | 
|  | 967 | BDI_CAP_READ_MAP | | 
|  | 968 | BDI_CAP_WRITE_MAP; | 
|  | 969 | break; | 
|  | 970 |  | 
|  | 971 | default: | 
|  | 972 | return -EINVAL; | 
|  | 973 | } | 
|  | 974 | } | 
|  | 975 |  | 
|  | 976 | /* eliminate any capabilities that we can't support on this | 
|  | 977 | * device */ | 
|  | 978 | if (!file->f_op->get_unmapped_area) | 
|  | 979 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
|  | 980 | if (!file->f_op->read) | 
|  | 981 | capabilities &= ~BDI_CAP_MAP_COPY; | 
|  | 982 |  | 
| Graff Yang | 28d7a6a | 2009-08-18 14:11:17 -0700 | [diff] [blame] | 983 | /* The file shall have been opened with read permission. */ | 
|  | 984 | if (!(file->f_mode & FMODE_READ)) | 
|  | 985 | return -EACCES; | 
|  | 986 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | if (flags & MAP_SHARED) { | 
|  | 988 | /* do checks for writing, appending and locking */ | 
|  | 989 | if ((prot & PROT_WRITE) && | 
|  | 990 | !(file->f_mode & FMODE_WRITE)) | 
|  | 991 | return -EACCES; | 
|  | 992 |  | 
| Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 993 | if (IS_APPEND(file_inode(file)) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | (file->f_mode & FMODE_WRITE)) | 
|  | 995 | return -EACCES; | 
|  | 996 |  | 
| Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 997 | if (locks_verify_locked(file_inode(file))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | return -EAGAIN; | 
|  | 999 |  | 
|  | 1000 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 
|  | 1001 | return -ENODEV; | 
|  | 1002 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | /* we mustn't privatise shared mappings */ | 
|  | 1004 | capabilities &= ~BDI_CAP_MAP_COPY; | 
|  | 1005 | } | 
|  | 1006 | else { | 
|  | 1007 | /* we're going to read the file into private memory we | 
|  | 1008 | * allocate */ | 
|  | 1009 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 
|  | 1010 | return -ENODEV; | 
|  | 1011 |  | 
|  | 1012 | /* we don't permit a private writable mapping to be | 
|  | 1013 | * shared with the backing device */ | 
|  | 1014 | if (prot & PROT_WRITE) | 
|  | 1015 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
|  | 1016 | } | 
|  | 1017 |  | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1018 | if (capabilities & BDI_CAP_MAP_DIRECT) { | 
|  | 1019 | if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  || | 
|  | 1020 | ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || | 
|  | 1021 | ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP)) | 
|  | 1022 | ) { | 
|  | 1023 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
|  | 1024 | if (flags & MAP_SHARED) { | 
|  | 1025 | printk(KERN_WARNING | 
|  | 1026 | "MAP_SHARED not completely supported on !MMU\n"); | 
|  | 1027 | return -EINVAL; | 
|  | 1028 | } | 
|  | 1029 | } | 
|  | 1030 | } | 
|  | 1031 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | /* handle executable mappings and implied executable | 
|  | 1033 | * mappings */ | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 1034 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | if (prot & PROT_EXEC) | 
|  | 1036 | return -EPERM; | 
|  | 1037 | } | 
|  | 1038 | else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { | 
|  | 1039 | /* handle implication of PROT_EXEC by PROT_READ */ | 
|  | 1040 | if (current->personality & READ_IMPLIES_EXEC) { | 
|  | 1041 | if (capabilities & BDI_CAP_EXEC_MAP) | 
|  | 1042 | prot |= PROT_EXEC; | 
|  | 1043 | } | 
|  | 1044 | } | 
|  | 1045 | else if ((prot & PROT_READ) && | 
|  | 1046 | (prot & PROT_EXEC) && | 
|  | 1047 | !(capabilities & BDI_CAP_EXEC_MAP) | 
|  | 1048 | ) { | 
|  | 1049 | /* backing file is not executable, try to copy */ | 
|  | 1050 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
|  | 1051 | } | 
|  | 1052 | } | 
|  | 1053 | else { | 
|  | 1054 | /* anonymous mappings are always memory backed and can be | 
|  | 1055 | * privately mapped | 
|  | 1056 | */ | 
|  | 1057 | capabilities = BDI_CAP_MAP_COPY; | 
|  | 1058 |  | 
|  | 1059 | /* handle PROT_EXEC implication by PROT_READ */ | 
|  | 1060 | if ((prot & PROT_READ) && | 
|  | 1061 | (current->personality & READ_IMPLIES_EXEC)) | 
|  | 1062 | prot |= PROT_EXEC; | 
|  | 1063 | } | 
|  | 1064 |  | 
|  | 1065 | /* allow the security API to have its say */ | 
| Al Viro | e546785 | 2012-05-30 13:30:51 -0400 | [diff] [blame] | 1066 | ret = security_mmap_addr(addr); | 
|  | 1067 | if (ret < 0) | 
|  | 1068 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 |  | 
|  | 1070 | /* looks okay */ | 
|  | 1071 | *_capabilities = capabilities; | 
|  | 1072 | return 0; | 
|  | 1073 | } | 
|  | 1074 |  | 
|  | 1075 | /* | 
|  | 1076 | * we've determined that we can make the mapping, now translate what we | 
|  | 1077 | * now know into VMA flags | 
|  | 1078 | */ | 
|  | 1079 | static unsigned long determine_vm_flags(struct file *file, | 
|  | 1080 | unsigned long prot, | 
|  | 1081 | unsigned long flags, | 
|  | 1082 | unsigned long capabilities) | 
|  | 1083 | { | 
|  | 1084 | unsigned long vm_flags; | 
|  | 1085 |  | 
|  | 1086 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | /* vm_flags |= mm->def_flags; */ | 
|  | 1088 |  | 
|  | 1089 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) { | 
|  | 1090 | /* attempt to share read-only copies of mapped file chunks */ | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1091 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | if (file && !(prot & PROT_WRITE)) | 
|  | 1093 | vm_flags |= VM_MAYSHARE; | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1094 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | /* overlay a shareable mapping on the backing device or inode | 
|  | 1096 | * if possible - used for chardevs, ramfs/tmpfs/shmfs and | 
|  | 1097 | * romfs/cramfs */ | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1098 | vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | if (flags & MAP_SHARED) | 
| Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1100 | vm_flags |= VM_SHARED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1101 | } | 
|  | 1102 |  | 
|  | 1103 | /* refuse to let anyone share private mappings with this process if | 
|  | 1104 | * it's being traced - otherwise breakpoints set in it may interfere | 
|  | 1105 | * with another untraced process | 
|  | 1106 | */ | 
| Tejun Heo | a288eec | 2011-06-17 16:50:37 +0200 | [diff] [blame] | 1107 | if ((flags & MAP_PRIVATE) && current->ptrace) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | vm_flags &= ~VM_MAYSHARE; | 
|  | 1109 |  | 
|  | 1110 | return vm_flags; | 
|  | 1111 | } | 
|  | 1112 |  | 
|  | 1113 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1114 | * set up a shared mapping on a file (the driver or filesystem provides and | 
|  | 1115 | * pins the storage) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1117 | static int do_mmap_shared_file(struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | { | 
|  | 1119 | int ret; | 
|  | 1120 |  | 
|  | 1121 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1122 | if (ret == 0) { | 
|  | 1123 | vma->vm_region->vm_top = vma->vm_region->vm_end; | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1124 | return 0; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1125 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | if (ret != -ENOSYS) | 
|  | 1127 | return ret; | 
|  | 1128 |  | 
| David Howells | 3fa3046 | 2010-03-23 13:35:21 -0700 | [diff] [blame] | 1129 | /* getting -ENOSYS indicates that direct mmap isn't possible (as | 
|  | 1130 | * opposed to tried but failed) so we can only give a suitable error as | 
|  | 1131 | * it's not possible to make a private copy if MAP_SHARED was given */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | return -ENODEV; | 
|  | 1133 | } | 
|  | 1134 |  | 
|  | 1135 | /* | 
|  | 1136 | * set up a private mapping or an anonymous shared mapping | 
|  | 1137 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1138 | static int do_mmap_private(struct vm_area_struct *vma, | 
|  | 1139 | struct vm_region *region, | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1140 | unsigned long len, | 
|  | 1141 | unsigned long capabilities) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1143 | struct page *pages; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1144 | unsigned long total, point, n; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | void *base; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1146 | int ret, order; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 |  | 
|  | 1148 | /* invoke the file's mapping function so that it can keep track of | 
|  | 1149 | * shared mappings on devices or memory | 
|  | 1150 | * - VM_MAYSHARE will be set if it may attempt to share | 
|  | 1151 | */ | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1152 | if (capabilities & BDI_CAP_MAP_DIRECT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1154 | if (ret == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 | /* shouldn't return success if we're not sharing */ | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1156 | BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); | 
|  | 1157 | vma->vm_region->vm_top = vma->vm_region->vm_end; | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1158 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | } | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1160 | if (ret != -ENOSYS) | 
|  | 1161 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 |  | 
|  | 1163 | /* getting an ENOSYS error indicates that direct mmap isn't | 
|  | 1164 | * possible (as opposed to tried but failed) so we'll try to | 
|  | 1165 | * make a private copy of the data and map that instead */ | 
|  | 1166 | } | 
|  | 1167 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1168 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | /* allocate some memory to hold the mapping | 
|  | 1170 | * - note that this may not return a page-aligned address if the object | 
|  | 1171 | *   we're allocating is smaller than a page | 
|  | 1172 | */ | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1173 | order = get_order(len); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1174 | kdebug("alloc order %d for %lx", order, len); | 
|  | 1175 |  | 
|  | 1176 | pages = alloc_pages(GFP_KERNEL, order); | 
|  | 1177 | if (!pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | goto enomem; | 
|  | 1179 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1180 | total = 1 << order; | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1181 | atomic_long_add(total, &mmap_pages_allocated); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 |  | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1183 | point = len >> PAGE_SHIFT; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1184 |  | 
|  | 1185 | /* we allocated a power-of-2 sized page set, so we may want to trim off | 
|  | 1186 | * the excess */ | 
|  | 1187 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | 
|  | 1188 | while (total > point) { | 
|  | 1189 | order = ilog2(total - point); | 
|  | 1190 | n = 1 << order; | 
|  | 1191 | kdebug("shave %lu/%lu @%lu", n, total - point, total); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1192 | atomic_long_sub(n, &mmap_pages_allocated); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1193 | total -= n; | 
|  | 1194 | set_page_refcounted(pages + total); | 
|  | 1195 | __free_pages(pages + total, order); | 
|  | 1196 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1197 | } | 
|  | 1198 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1199 | for (point = 1; point < total; point++) | 
|  | 1200 | set_page_refcounted(&pages[point]); | 
|  | 1201 |  | 
|  | 1202 | base = page_address(pages); | 
|  | 1203 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; | 
|  | 1204 | region->vm_start = (unsigned long) base; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1205 | region->vm_end   = region->vm_start + len; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1206 | region->vm_top   = region->vm_start + (total << PAGE_SHIFT); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1207 |  | 
|  | 1208 | vma->vm_start = region->vm_start; | 
|  | 1209 | vma->vm_end   = region->vm_start + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 |  | 
|  | 1211 | if (vma->vm_file) { | 
|  | 1212 | /* read the contents of a file into the copy */ | 
|  | 1213 | mm_segment_t old_fs; | 
|  | 1214 | loff_t fpos; | 
|  | 1215 |  | 
|  | 1216 | fpos = vma->vm_pgoff; | 
|  | 1217 | fpos <<= PAGE_SHIFT; | 
|  | 1218 |  | 
|  | 1219 | old_fs = get_fs(); | 
|  | 1220 | set_fs(KERNEL_DS); | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1221 | ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | set_fs(old_fs); | 
|  | 1223 |  | 
|  | 1224 | if (ret < 0) | 
|  | 1225 | goto error_free; | 
|  | 1226 |  | 
|  | 1227 | /* clear the last little bit */ | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1228 | if (ret < len) | 
|  | 1229 | memset(base + ret, 0, len - ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | } | 
|  | 1232 |  | 
|  | 1233 | return 0; | 
|  | 1234 |  | 
|  | 1235 | error_free: | 
| Namhyung Kim | 7223bb4 | 2011-05-24 17:11:26 -0700 | [diff] [blame] | 1236 | free_page_series(region->vm_start, region->vm_top); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1237 | region->vm_start = vma->vm_start = 0; | 
|  | 1238 | region->vm_end   = vma->vm_end = 0; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1239 | region->vm_top   = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1240 | return ret; | 
|  | 1241 |  | 
|  | 1242 | enomem: | 
| Greg Ungerer | 05ae6fa | 2009-01-13 17:30:22 +1000 | [diff] [blame] | 1243 | printk("Allocation of length %lu from process %d (%s) failed\n", | 
|  | 1244 | len, current->pid, current->comm); | 
| David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1245 | show_free_areas(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 | return -ENOMEM; | 
|  | 1247 | } | 
|  | 1248 |  | 
|  | 1249 | /* | 
|  | 1250 | * handle mapping creation for uClinux | 
|  | 1251 | */ | 
| Al Viro | e3fc629 | 2012-05-30 20:08:42 -0400 | [diff] [blame] | 1252 | unsigned long do_mmap_pgoff(struct file *file, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1253 | unsigned long addr, | 
|  | 1254 | unsigned long len, | 
|  | 1255 | unsigned long prot, | 
|  | 1256 | unsigned long flags, | 
| Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1257 | unsigned long pgoff, | 
| Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 1258 | unsigned long *populate) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1260 | struct vm_area_struct *vma; | 
|  | 1261 | struct vm_region *region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1262 | struct rb_node *rb; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1263 | unsigned long capabilities, vm_flags, result; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | int ret; | 
|  | 1265 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1266 | kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); | 
|  | 1267 |  | 
| Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 1268 | *populate = 0; | 
| Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1269 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 | /* decide whether we should attempt the mapping, and if so what sort of | 
|  | 1271 | * mapping */ | 
|  | 1272 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, | 
|  | 1273 | &capabilities); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1274 | if (ret < 0) { | 
|  | 1275 | kleave(" = %d [val]", ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | return ret; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1277 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 |  | 
| David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 1279 | /* we ignore the address hint */ | 
|  | 1280 | addr = 0; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1281 | len = PAGE_ALIGN(len); | 
| David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 1282 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | /* we've determined that we can make the mapping, now translate what we | 
|  | 1284 | * now know into VMA flags */ | 
|  | 1285 | vm_flags = determine_vm_flags(file, prot, flags, capabilities); | 
|  | 1286 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1287 | /* we're going to need to record the mapping */ | 
|  | 1288 | region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); | 
|  | 1289 | if (!region) | 
|  | 1290 | goto error_getting_region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1292 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 
|  | 1293 | if (!vma) | 
|  | 1294 | goto error_getting_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 |  | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1296 | region->vm_usage = 1; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1297 | region->vm_flags = vm_flags; | 
|  | 1298 | region->vm_pgoff = pgoff; | 
|  | 1299 |  | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1300 | INIT_LIST_HEAD(&vma->anon_vma_chain); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1301 | vma->vm_flags = vm_flags; | 
|  | 1302 | vma->vm_pgoff = pgoff; | 
|  | 1303 |  | 
|  | 1304 | if (file) { | 
| Al Viro | cb0942b | 2012-08-27 14:48:26 -0400 | [diff] [blame] | 1305 | region->vm_file = get_file(file); | 
|  | 1306 | vma->vm_file = get_file(file); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1307 | } | 
|  | 1308 |  | 
|  | 1309 | down_write(&nommu_region_sem); | 
|  | 1310 |  | 
|  | 1311 | /* if we want to share, we need to check for regions created by other | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | * mmap() calls that overlap with our proposed mapping | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1313 | * - we can only share with a superset match on most regular files | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | * - shared mappings on character devices and memory backed files are | 
|  | 1315 | *   permitted to overlap inexactly as far as we are concerned for in | 
|  | 1316 | *   these cases, sharing is handled in the driver or filesystem rather | 
|  | 1317 | *   than here | 
|  | 1318 | */ | 
|  | 1319 | if (vm_flags & VM_MAYSHARE) { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1320 | struct vm_region *pregion; | 
|  | 1321 | unsigned long pglen, rpglen, pgend, rpgend, start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1323 | pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 1324 | pgend = pgoff + pglen; | 
| David Howells | 165b239 | 2007-03-22 00:11:24 -0800 | [diff] [blame] | 1325 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1326 | for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { | 
|  | 1327 | pregion = rb_entry(rb, struct vm_region, vm_rb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1329 | if (!(pregion->vm_flags & VM_MAYSHARE)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 | continue; | 
|  | 1331 |  | 
|  | 1332 | /* search for overlapping mappings on the same file */ | 
| Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 1333 | if (file_inode(pregion->vm_file) != | 
|  | 1334 | file_inode(file)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | continue; | 
|  | 1336 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1337 | if (pregion->vm_pgoff >= pgend) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | continue; | 
|  | 1339 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1340 | rpglen = pregion->vm_end - pregion->vm_start; | 
|  | 1341 | rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 1342 | rpgend = pregion->vm_pgoff + rpglen; | 
|  | 1343 | if (pgoff >= rpgend) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | continue; | 
|  | 1345 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1346 | /* handle inexactly overlapping matches between | 
|  | 1347 | * mappings */ | 
|  | 1348 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && | 
|  | 1349 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { | 
|  | 1350 | /* new mapping is not a subset of the region */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 
|  | 1352 | goto sharing_violation; | 
|  | 1353 | continue; | 
|  | 1354 | } | 
|  | 1355 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1356 | /* we've found a region we can share */ | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1357 | pregion->vm_usage++; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1358 | vma->vm_region = pregion; | 
|  | 1359 | start = pregion->vm_start; | 
|  | 1360 | start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; | 
|  | 1361 | vma->vm_start = start; | 
|  | 1362 | vma->vm_end = start + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1364 | if (pregion->vm_flags & VM_MAPPED_COPY) { | 
|  | 1365 | kdebug("share copy"); | 
|  | 1366 | vma->vm_flags |= VM_MAPPED_COPY; | 
|  | 1367 | } else { | 
|  | 1368 | kdebug("share mmap"); | 
|  | 1369 | ret = do_mmap_shared_file(vma); | 
|  | 1370 | if (ret < 0) { | 
|  | 1371 | vma->vm_region = NULL; | 
|  | 1372 | vma->vm_start = 0; | 
|  | 1373 | vma->vm_end = 0; | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1374 | pregion->vm_usage--; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1375 | pregion = NULL; | 
|  | 1376 | goto error_just_free; | 
|  | 1377 | } | 
|  | 1378 | } | 
|  | 1379 | fput(region->vm_file); | 
|  | 1380 | kmem_cache_free(vm_region_jar, region); | 
|  | 1381 | region = pregion; | 
|  | 1382 | result = start; | 
|  | 1383 | goto share; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1384 | } | 
|  | 1385 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1386 | /* obtain the address at which to make a shared mapping | 
|  | 1387 | * - this is the hook for quasi-memory character devices to | 
|  | 1388 | *   tell us the location of a shared mapping | 
|  | 1389 | */ | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1390 | if (capabilities & BDI_CAP_MAP_DIRECT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | addr = file->f_op->get_unmapped_area(file, addr, len, | 
|  | 1392 | pgoff, flags); | 
| Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1393 | if (IS_ERR_VALUE(addr)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | ret = addr; | 
| Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1395 | if (ret != -ENOSYS) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1396 | goto error_just_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 |  | 
|  | 1398 | /* the driver refused to tell us where to site | 
|  | 1399 | * the mapping so we'll have to attempt to copy | 
|  | 1400 | * it */ | 
| Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1401 | ret = -ENODEV; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1402 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1403 | goto error_just_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 |  | 
|  | 1405 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1406 | } else { | 
|  | 1407 | vma->vm_start = region->vm_start = addr; | 
|  | 1408 | vma->vm_end = region->vm_end = addr + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | } | 
|  | 1410 | } | 
|  | 1411 | } | 
|  | 1412 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1413 | vma->vm_region = region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1414 |  | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1415 | /* set up the mapping | 
|  | 1416 | * - the region is filled in if BDI_CAP_MAP_DIRECT is still set | 
|  | 1417 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 | if (file && vma->vm_flags & VM_SHARED) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1419 | ret = do_mmap_shared_file(vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | else | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1421 | ret = do_mmap_private(vma, region, len, capabilities); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1422 | if (ret < 0) | 
| David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1423 | goto error_just_free; | 
|  | 1424 | add_nommu_region(region); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1425 |  | 
| Jie Zhang | ea63763 | 2009-12-14 18:00:02 -0800 | [diff] [blame] | 1426 | /* clear anonymous mappings that don't ask for uninitialized data */ | 
|  | 1427 | if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) | 
|  | 1428 | memset((void *)region->vm_start, 0, | 
|  | 1429 | region->vm_end - region->vm_start); | 
|  | 1430 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1431 | /* okay... we have a mapping; now we have to register it */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1432 | result = vma->vm_start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | current->mm->total_vm += len >> PAGE_SHIFT; | 
|  | 1435 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1436 | share: | 
|  | 1437 | add_vma_to_mm(current->mm, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1438 |  | 
| Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 1439 | /* we flush the region from the icache only when the first executable | 
|  | 1440 | * mapping of it is made  */ | 
|  | 1441 | if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { | 
|  | 1442 | flush_icache_range(region->vm_start, region->vm_end); | 
|  | 1443 | region->vm_icache_flushed = true; | 
|  | 1444 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 |  | 
| Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 1446 | up_write(&nommu_region_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1447 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1448 | kleave(" = %lx", result); | 
|  | 1449 | return result; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1451 | error_just_free: | 
|  | 1452 | up_write(&nommu_region_sem); | 
|  | 1453 | error: | 
| David Howells | 89a8640 | 2009-10-30 13:13:26 +0000 | [diff] [blame] | 1454 | if (region->vm_file) | 
|  | 1455 | fput(region->vm_file); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1456 | kmem_cache_free(vm_region_jar, region); | 
| David Howells | 89a8640 | 2009-10-30 13:13:26 +0000 | [diff] [blame] | 1457 | if (vma->vm_file) | 
|  | 1458 | fput(vma->vm_file); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1459 | kmem_cache_free(vm_area_cachep, vma); | 
|  | 1460 | kleave(" = %d", ret); | 
|  | 1461 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1463 | sharing_violation: | 
|  | 1464 | up_write(&nommu_region_sem); | 
|  | 1465 | printk(KERN_WARNING "Attempt to share mismatched mappings\n"); | 
|  | 1466 | ret = -EINVAL; | 
|  | 1467 | goto error; | 
|  | 1468 |  | 
|  | 1469 | error_getting_vma: | 
|  | 1470 | kmem_cache_free(vm_region_jar, region); | 
|  | 1471 | printk(KERN_WARNING "Allocation of vma for %lu byte allocation" | 
|  | 1472 | " from process %d failed\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | len, current->pid); | 
| David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1474 | show_free_areas(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1475 | return -ENOMEM; | 
|  | 1476 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1477 | error_getting_region: | 
|  | 1478 | printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" | 
|  | 1479 | " from process %d failed\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 | len, current->pid); | 
| David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1481 | show_free_areas(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | return -ENOMEM; | 
|  | 1483 | } | 
| Linus Torvalds | 6be5ceb | 2012-04-20 17:13:58 -0700 | [diff] [blame] | 1484 |  | 
| Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1485 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | 
|  | 1486 | unsigned long, prot, unsigned long, flags, | 
|  | 1487 | unsigned long, fd, unsigned long, pgoff) | 
|  | 1488 | { | 
|  | 1489 | struct file *file = NULL; | 
|  | 1490 | unsigned long retval = -EBADF; | 
|  | 1491 |  | 
| Al Viro | 120a795 | 2010-10-30 02:54:44 -0400 | [diff] [blame] | 1492 | audit_mmap_fd(fd, flags); | 
| Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1493 | if (!(flags & MAP_ANONYMOUS)) { | 
|  | 1494 | file = fget(fd); | 
|  | 1495 | if (!file) | 
|  | 1496 | goto out; | 
|  | 1497 | } | 
|  | 1498 |  | 
|  | 1499 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 
|  | 1500 |  | 
| Greg Ungerer | ad1ed29 | 2012-06-04 14:29:59 +1000 | [diff] [blame] | 1501 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); | 
| Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1502 |  | 
|  | 1503 | if (file) | 
|  | 1504 | fput(file); | 
|  | 1505 | out: | 
|  | 1506 | return retval; | 
|  | 1507 | } | 
|  | 1508 |  | 
| Christoph Hellwig | a467937 | 2010-03-10 15:21:15 -0800 | [diff] [blame] | 1509 | #ifdef __ARCH_WANT_SYS_OLD_MMAP | 
|  | 1510 | struct mmap_arg_struct { | 
|  | 1511 | unsigned long addr; | 
|  | 1512 | unsigned long len; | 
|  | 1513 | unsigned long prot; | 
|  | 1514 | unsigned long flags; | 
|  | 1515 | unsigned long fd; | 
|  | 1516 | unsigned long offset; | 
|  | 1517 | }; | 
|  | 1518 |  | 
|  | 1519 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) | 
|  | 1520 | { | 
|  | 1521 | struct mmap_arg_struct a; | 
|  | 1522 |  | 
|  | 1523 | if (copy_from_user(&a, arg, sizeof(a))) | 
|  | 1524 | return -EFAULT; | 
|  | 1525 | if (a.offset & ~PAGE_MASK) | 
|  | 1526 | return -EINVAL; | 
|  | 1527 |  | 
|  | 1528 | return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, | 
|  | 1529 | a.offset >> PAGE_SHIFT); | 
|  | 1530 | } | 
|  | 1531 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ | 
|  | 1532 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1533 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1534 | * split a vma into two pieces at address 'addr', a new vma is allocated either | 
|  | 1535 | * for the first part or the tail. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1537 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | 
|  | 1538 | unsigned long addr, int new_below) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1540 | struct vm_area_struct *new; | 
|  | 1541 | struct vm_region *region; | 
|  | 1542 | unsigned long npages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1544 | kenter(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 |  | 
| David Howells | 779c102 | 2010-01-15 17:01:34 -0800 | [diff] [blame] | 1546 | /* we're only permitted to split anonymous regions (these should have | 
|  | 1547 | * only a single usage on the region) */ | 
|  | 1548 | if (vma->vm_file) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1549 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1551 | if (mm->map_count >= sysctl_max_map_count) | 
|  | 1552 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1554 | region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); | 
|  | 1555 | if (!region) | 
|  | 1556 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1558 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 
|  | 1559 | if (!new) { | 
|  | 1560 | kmem_cache_free(vm_region_jar, region); | 
|  | 1561 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1562 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1563 |  | 
|  | 1564 | /* most fields are the same, copy all, and then fixup */ | 
|  | 1565 | *new = *vma; | 
|  | 1566 | *region = *vma->vm_region; | 
|  | 1567 | new->vm_region = region; | 
|  | 1568 |  | 
|  | 1569 | npages = (addr - vma->vm_start) >> PAGE_SHIFT; | 
|  | 1570 |  | 
|  | 1571 | if (new_below) { | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1572 | region->vm_top = region->vm_end = new->vm_end = addr; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1573 | } else { | 
|  | 1574 | region->vm_start = new->vm_start = addr; | 
|  | 1575 | region->vm_pgoff = new->vm_pgoff += npages; | 
|  | 1576 | } | 
|  | 1577 |  | 
|  | 1578 | if (new->vm_ops && new->vm_ops->open) | 
|  | 1579 | new->vm_ops->open(new); | 
|  | 1580 |  | 
|  | 1581 | delete_vma_from_mm(vma); | 
|  | 1582 | down_write(&nommu_region_sem); | 
|  | 1583 | delete_nommu_region(vma->vm_region); | 
|  | 1584 | if (new_below) { | 
|  | 1585 | vma->vm_region->vm_start = vma->vm_start = addr; | 
|  | 1586 | vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; | 
|  | 1587 | } else { | 
|  | 1588 | vma->vm_region->vm_end = vma->vm_end = addr; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1589 | vma->vm_region->vm_top = addr; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1590 | } | 
|  | 1591 | add_nommu_region(vma->vm_region); | 
|  | 1592 | add_nommu_region(new->vm_region); | 
|  | 1593 | up_write(&nommu_region_sem); | 
|  | 1594 | add_vma_to_mm(mm, vma); | 
|  | 1595 | add_vma_to_mm(mm, new); | 
|  | 1596 | return 0; | 
|  | 1597 | } | 
|  | 1598 |  | 
|  | 1599 | /* | 
|  | 1600 | * shrink a VMA by removing the specified chunk from either the beginning or | 
|  | 1601 | * the end | 
|  | 1602 | */ | 
|  | 1603 | static int shrink_vma(struct mm_struct *mm, | 
|  | 1604 | struct vm_area_struct *vma, | 
|  | 1605 | unsigned long from, unsigned long to) | 
|  | 1606 | { | 
|  | 1607 | struct vm_region *region; | 
|  | 1608 |  | 
|  | 1609 | kenter(""); | 
|  | 1610 |  | 
|  | 1611 | /* adjust the VMA's pointers, which may reposition it in the MM's tree | 
|  | 1612 | * and list */ | 
|  | 1613 | delete_vma_from_mm(vma); | 
|  | 1614 | if (from > vma->vm_start) | 
|  | 1615 | vma->vm_end = from; | 
|  | 1616 | else | 
|  | 1617 | vma->vm_start = to; | 
|  | 1618 | add_vma_to_mm(mm, vma); | 
|  | 1619 |  | 
|  | 1620 | /* cut the backing region down to size */ | 
|  | 1621 | region = vma->vm_region; | 
| David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1622 | BUG_ON(region->vm_usage != 1); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1623 |  | 
|  | 1624 | down_write(&nommu_region_sem); | 
|  | 1625 | delete_nommu_region(region); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1626 | if (from > region->vm_start) { | 
|  | 1627 | to = region->vm_top; | 
|  | 1628 | region->vm_top = region->vm_end = from; | 
|  | 1629 | } else { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1630 | region->vm_start = to; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1631 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1632 | add_nommu_region(region); | 
|  | 1633 | up_write(&nommu_region_sem); | 
|  | 1634 |  | 
|  | 1635 | free_page_series(from, to); | 
|  | 1636 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | } | 
|  | 1638 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1639 | /* | 
|  | 1640 | * release a mapping | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1641 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single | 
|  | 1642 | *   VMA, though it need not cover the whole VMA | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1643 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1644 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1645 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1646 | struct vm_area_struct *vma; | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1647 | unsigned long end; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1648 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1650 | kenter(",%lx,%zx", start, len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 |  | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1652 | len = PAGE_ALIGN(len); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1653 | if (len == 0) | 
|  | 1654 | return -EINVAL; | 
|  | 1655 |  | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1656 | end = start + len; | 
|  | 1657 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1658 | /* find the first potentially overlapping VMA */ | 
|  | 1659 | vma = find_vma(mm, start); | 
|  | 1660 | if (!vma) { | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1661 | static int limit = 0; | 
|  | 1662 | if (limit < 5) { | 
|  | 1663 | printk(KERN_WARNING | 
|  | 1664 | "munmap of memory not mmapped by process %d" | 
|  | 1665 | " (%s): 0x%lx-0x%lx\n", | 
|  | 1666 | current->pid, current->comm, | 
|  | 1667 | start, start + len - 1); | 
|  | 1668 | limit++; | 
|  | 1669 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1670 | return -EINVAL; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1671 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1672 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1673 | /* we're allowed to split an anonymous VMA but not a file-backed one */ | 
|  | 1674 | if (vma->vm_file) { | 
|  | 1675 | do { | 
|  | 1676 | if (start > vma->vm_start) { | 
|  | 1677 | kleave(" = -EINVAL [miss]"); | 
|  | 1678 | return -EINVAL; | 
|  | 1679 | } | 
|  | 1680 | if (end == vma->vm_end) | 
|  | 1681 | goto erase_whole_vma; | 
| Namhyung Kim | d75a310 | 2011-05-24 17:11:25 -0700 | [diff] [blame] | 1682 | vma = vma->vm_next; | 
|  | 1683 | } while (vma); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1684 | kleave(" = -EINVAL [split file]"); | 
|  | 1685 | return -EINVAL; | 
|  | 1686 | } else { | 
|  | 1687 | /* the chunk must be a subset of the VMA found */ | 
|  | 1688 | if (start == vma->vm_start && end == vma->vm_end) | 
|  | 1689 | goto erase_whole_vma; | 
|  | 1690 | if (start < vma->vm_start || end > vma->vm_end) { | 
|  | 1691 | kleave(" = -EINVAL [superset]"); | 
|  | 1692 | return -EINVAL; | 
|  | 1693 | } | 
|  | 1694 | if (start & ~PAGE_MASK) { | 
|  | 1695 | kleave(" = -EINVAL [unaligned start]"); | 
|  | 1696 | return -EINVAL; | 
|  | 1697 | } | 
|  | 1698 | if (end != vma->vm_end && end & ~PAGE_MASK) { | 
|  | 1699 | kleave(" = -EINVAL [unaligned split]"); | 
|  | 1700 | return -EINVAL; | 
|  | 1701 | } | 
|  | 1702 | if (start != vma->vm_start && end != vma->vm_end) { | 
|  | 1703 | ret = split_vma(mm, vma, start, 1); | 
|  | 1704 | if (ret < 0) { | 
|  | 1705 | kleave(" = %d [split]", ret); | 
|  | 1706 | return ret; | 
|  | 1707 | } | 
|  | 1708 | } | 
|  | 1709 | return shrink_vma(mm, vma, start, end); | 
|  | 1710 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1712 | erase_whole_vma: | 
|  | 1713 | delete_vma_from_mm(vma); | 
|  | 1714 | delete_vma(mm, vma); | 
|  | 1715 | kleave(" = 0"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 | return 0; | 
|  | 1717 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1718 | EXPORT_SYMBOL(do_munmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 |  | 
| Al Viro | bfce281 | 2012-04-20 21:57:04 -0400 | [diff] [blame] | 1720 | int vm_munmap(unsigned long addr, size_t len) | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1721 | { | 
| Al Viro | bfce281 | 2012-04-20 21:57:04 -0400 | [diff] [blame] | 1722 | struct mm_struct *mm = current->mm; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1723 | int ret; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1724 |  | 
|  | 1725 | down_write(&mm->mmap_sem); | 
|  | 1726 | ret = do_munmap(mm, addr, len); | 
|  | 1727 | up_write(&mm->mmap_sem); | 
|  | 1728 | return ret; | 
|  | 1729 | } | 
| Linus Torvalds | a46ef99 | 2012-04-20 16:20:01 -0700 | [diff] [blame] | 1730 | EXPORT_SYMBOL(vm_munmap); | 
|  | 1731 |  | 
|  | 1732 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | 
|  | 1733 | { | 
| Al Viro | bfce281 | 2012-04-20 21:57:04 -0400 | [diff] [blame] | 1734 | return vm_munmap(addr, len); | 
| Linus Torvalds | a46ef99 | 2012-04-20 16:20:01 -0700 | [diff] [blame] | 1735 | } | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1736 |  | 
|  | 1737 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1738 | * release all the mappings made in a process's VM space | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1739 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1740 | void exit_mmap(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1741 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1742 | struct vm_area_struct *vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1743 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1744 | if (!mm) | 
|  | 1745 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1746 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1747 | kenter(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1748 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1749 | mm->total_vm = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1751 | while ((vma = mm->mmap)) { | 
|  | 1752 | mm->mmap = vma->vm_next; | 
|  | 1753 | delete_vma_from_mm(vma); | 
|  | 1754 | delete_vma(mm, vma); | 
| Steven J. Magnani | 04c3496 | 2010-11-24 12:56:54 -0800 | [diff] [blame] | 1755 | cond_resched(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1756 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1757 |  | 
|  | 1758 | kleave(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | } | 
|  | 1760 |  | 
| Linus Torvalds | e4eb1ff | 2012-04-20 15:35:40 -0700 | [diff] [blame] | 1761 | unsigned long vm_brk(unsigned long addr, unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | { | 
|  | 1763 | return -ENOMEM; | 
|  | 1764 | } | 
|  | 1765 |  | 
|  | 1766 | /* | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1767 | * expand (or shrink) an existing mapping, potentially moving it at the same | 
|  | 1768 | * time (controlled by the MREMAP_MAYMOVE flag and available VM space) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1769 | * | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1770 | * under NOMMU conditions, we only permit changing a mapping's size, and only | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1771 | * as long as it stays within the region allocated by do_mmap_private() and the | 
|  | 1772 | * block is not shareable | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1773 | * | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1774 | * MREMAP_FIXED is not supported under NOMMU conditions | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1775 | */ | 
| Al Viro | 4b377ba | 2013-03-04 10:47:59 -0500 | [diff] [blame] | 1776 | static unsigned long do_mremap(unsigned long addr, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | unsigned long old_len, unsigned long new_len, | 
|  | 1778 | unsigned long flags, unsigned long new_addr) | 
|  | 1779 | { | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1780 | struct vm_area_struct *vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 |  | 
|  | 1782 | /* insanity checks first */ | 
| Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1783 | old_len = PAGE_ALIGN(old_len); | 
|  | 1784 | new_len = PAGE_ALIGN(new_len); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1785 | if (old_len == 0 || new_len == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1786 | return (unsigned long) -EINVAL; | 
|  | 1787 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1788 | if (addr & ~PAGE_MASK) | 
|  | 1789 | return -EINVAL; | 
|  | 1790 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | if (flags & MREMAP_FIXED && new_addr != addr) | 
|  | 1792 | return (unsigned long) -EINVAL; | 
|  | 1793 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1794 | vma = find_vma_exact(current->mm, addr, old_len); | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1795 | if (!vma) | 
|  | 1796 | return (unsigned long) -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1797 |  | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1798 | if (vma->vm_end != vma->vm_start + old_len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | return (unsigned long) -EFAULT; | 
|  | 1800 |  | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1801 | if (vma->vm_flags & VM_MAYSHARE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1802 | return (unsigned long) -EPERM; | 
|  | 1803 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1804 | if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1805 | return (unsigned long) -ENOMEM; | 
|  | 1806 |  | 
|  | 1807 | /* all checks complete - do it */ | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1808 | vma->vm_end = vma->vm_start + new_len; | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1809 | return vma->vm_start; | 
|  | 1810 | } | 
|  | 1811 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1812 | SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | 
|  | 1813 | unsigned long, new_len, unsigned long, flags, | 
|  | 1814 | unsigned long, new_addr) | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1815 | { | 
|  | 1816 | unsigned long ret; | 
|  | 1817 |  | 
|  | 1818 | down_write(¤t->mm->mmap_sem); | 
|  | 1819 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | 
|  | 1820 | up_write(¤t->mm->mmap_sem); | 
|  | 1821 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1822 | } | 
|  | 1823 |  | 
| Michel Lespinasse | 240aade | 2013-02-22 16:35:56 -0800 | [diff] [blame] | 1824 | struct page *follow_page_mask(struct vm_area_struct *vma, | 
|  | 1825 | unsigned long address, unsigned int flags, | 
|  | 1826 | unsigned int *page_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | { | 
| Michel Lespinasse | 240aade | 2013-02-22 16:35:56 -0800 | [diff] [blame] | 1828 | *page_mask = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 | return NULL; | 
|  | 1830 | } | 
|  | 1831 |  | 
| Bob Liu | 8f3b132 | 2011-07-08 15:39:46 -0700 | [diff] [blame] | 1832 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | 
|  | 1833 | unsigned long pfn, unsigned long size, pgprot_t prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | { | 
| Bob Liu | 8f3b132 | 2011-07-08 15:39:46 -0700 | [diff] [blame] | 1835 | if (addr != (pfn << PAGE_SHIFT)) | 
|  | 1836 | return -EINVAL; | 
|  | 1837 |  | 
| Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 1838 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; | 
| Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 1839 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1840 | } | 
| Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1841 | EXPORT_SYMBOL(remap_pfn_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1842 |  | 
| Linus Torvalds | 3c0b9de | 2013-04-27 13:25:38 -0700 | [diff] [blame] | 1843 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) | 
|  | 1844 | { | 
|  | 1845 | unsigned long pfn = start >> PAGE_SHIFT; | 
|  | 1846 | unsigned long vm_len = vma->vm_end - vma->vm_start; | 
|  | 1847 |  | 
|  | 1848 | pfn += vma->vm_pgoff; | 
|  | 1849 | return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); | 
|  | 1850 | } | 
|  | 1851 | EXPORT_SYMBOL(vm_iomap_memory); | 
|  | 1852 |  | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 1853 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 
|  | 1854 | unsigned long pgoff) | 
|  | 1855 | { | 
|  | 1856 | unsigned int size = vma->vm_end - vma->vm_start; | 
|  | 1857 |  | 
|  | 1858 | if (!(vma->vm_flags & VM_USERMAP)) | 
|  | 1859 | return -EINVAL; | 
|  | 1860 |  | 
|  | 1861 | vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); | 
|  | 1862 | vma->vm_end = vma->vm_start + size; | 
|  | 1863 |  | 
|  | 1864 | return 0; | 
|  | 1865 | } | 
|  | 1866 | EXPORT_SYMBOL(remap_vmalloc_range); | 
|  | 1867 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1868 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, | 
|  | 1869 | unsigned long len, unsigned long pgoff, unsigned long flags) | 
|  | 1870 | { | 
|  | 1871 | return -ENOMEM; | 
|  | 1872 | } | 
|  | 1873 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | void unmap_mapping_range(struct address_space *mapping, | 
|  | 1875 | loff_t const holebegin, loff_t const holelen, | 
|  | 1876 | int even_cows) | 
|  | 1877 | { | 
|  | 1878 | } | 
| Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1879 | EXPORT_SYMBOL(unmap_mapping_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1880 |  | 
|  | 1881 | /* | 
|  | 1882 | * Check that a process has enough memory to allocate a new virtual | 
|  | 1883 | * mapping. 0 means there is enough memory for the allocation to | 
|  | 1884 | * succeed and -ENOMEM implies there is not. | 
|  | 1885 | * | 
|  | 1886 | * We currently support three overcommit policies, which are set via the | 
|  | 1887 | * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting | 
|  | 1888 | * | 
|  | 1889 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | 
|  | 1890 | * Additional code 2002 Jul 20 by Robert Love. | 
|  | 1891 | * | 
|  | 1892 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | 
|  | 1893 | * | 
|  | 1894 | * Note this is a helper function intended to be used by LSMs which | 
|  | 1895 | * wish to use this logic. | 
|  | 1896 | */ | 
| Alan Cox | 34b4e4a | 2007-08-22 14:01:28 -0700 | [diff] [blame] | 1897 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1898 | { | 
| Andrew Shewmaker | c9b1d09 | 2013-04-29 15:08:10 -0700 | [diff] [blame] | 1899 | unsigned long free, allowed, reserve; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1900 |  | 
|  | 1901 | vm_acct_memory(pages); | 
|  | 1902 |  | 
|  | 1903 | /* | 
|  | 1904 | * Sometimes we want to use more memory than we have | 
|  | 1905 | */ | 
|  | 1906 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | 
|  | 1907 | return 0; | 
|  | 1908 |  | 
|  | 1909 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 
| Dmitry Fink | c15bef3 | 2011-07-25 17:12:19 -0700 | [diff] [blame] | 1910 | free = global_page_state(NR_FREE_PAGES); | 
|  | 1911 | free += global_page_state(NR_FILE_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 |  | 
| Dmitry Fink | c15bef3 | 2011-07-25 17:12:19 -0700 | [diff] [blame] | 1913 | /* | 
|  | 1914 | * shmem pages shouldn't be counted as free in this | 
|  | 1915 | * case, they can't be purged, only swapped out, and | 
|  | 1916 | * that won't affect the overall amount of available | 
|  | 1917 | * memory in the system. | 
|  | 1918 | */ | 
|  | 1919 | free -= global_page_state(NR_SHMEM); | 
|  | 1920 |  | 
| Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 1921 | free += get_nr_swap_pages(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1922 |  | 
|  | 1923 | /* | 
|  | 1924 | * Any slabs which are created with the | 
|  | 1925 | * SLAB_RECLAIM_ACCOUNT flag claim to have contents | 
|  | 1926 | * which are reclaimable, under pressure.  The dentry | 
|  | 1927 | * cache and most inode caches should fall into this | 
|  | 1928 | */ | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1929 | free += global_page_state(NR_SLAB_RECLAIMABLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 |  | 
|  | 1931 | /* | 
| Dmitry Fink | c15bef3 | 2011-07-25 17:12:19 -0700 | [diff] [blame] | 1932 | * Leave reserved pages. The pages are not for anonymous pages. | 
|  | 1933 | */ | 
|  | 1934 | if (free <= totalreserve_pages) | 
|  | 1935 | goto error; | 
|  | 1936 | else | 
|  | 1937 | free -= totalreserve_pages; | 
|  | 1938 |  | 
|  | 1939 | /* | 
| Andrew Shewmaker | 4eeab4f | 2013-04-29 15:08:11 -0700 | [diff] [blame] | 1940 | * Reserve some for root | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1941 | */ | 
|  | 1942 | if (!cap_sys_admin) | 
| Andrew Shewmaker | 4eeab4f | 2013-04-29 15:08:11 -0700 | [diff] [blame] | 1943 | free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1944 |  | 
|  | 1945 | if (free > pages) | 
|  | 1946 | return 0; | 
|  | 1947 |  | 
| Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1948 | goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | } | 
|  | 1950 |  | 
|  | 1951 | allowed = totalram_pages * sysctl_overcommit_ratio / 100; | 
|  | 1952 | /* | 
| Andrew Shewmaker | 4eeab4f | 2013-04-29 15:08:11 -0700 | [diff] [blame] | 1953 | * Reserve some 3% for root | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 | */ | 
|  | 1955 | if (!cap_sys_admin) | 
| Andrew Shewmaker | 4eeab4f | 2013-04-29 15:08:11 -0700 | [diff] [blame] | 1956 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1957 | allowed += total_swap_pages; | 
|  | 1958 |  | 
| Andrew Shewmaker | c9b1d09 | 2013-04-29 15:08:10 -0700 | [diff] [blame] | 1959 | /* | 
|  | 1960 | * Don't let a single process grow so big a user can't recover | 
|  | 1961 | */ | 
|  | 1962 | if (mm) { | 
|  | 1963 | reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); | 
|  | 1964 | allowed -= min(mm->total_vm / 32, reserve); | 
|  | 1965 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1966 |  | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 1967 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1968 | return 0; | 
| KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 1969 |  | 
| Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1970 | error: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1971 | vm_unacct_memory(pages); | 
|  | 1972 |  | 
|  | 1973 | return -ENOMEM; | 
|  | 1974 | } | 
|  | 1975 |  | 
| Stephen Wilson | cae5d39 | 2011-03-13 15:49:17 -0400 | [diff] [blame] | 1976 | int in_gate_area_no_mm(unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | { | 
|  | 1978 | return 0; | 
|  | 1979 | } | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1980 |  | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1981 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1982 | { | 
|  | 1983 | BUG(); | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1984 | return 0; | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1985 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1986 | EXPORT_SYMBOL(filemap_fault); | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1987 |  | 
| Konstantin Khlebnikov | 0b173bc | 2012-10-08 16:28:46 -0700 | [diff] [blame] | 1988 | int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, | 
|  | 1989 | unsigned long size, pgoff_t pgoff) | 
|  | 1990 | { | 
|  | 1991 | BUG(); | 
|  | 1992 | return 0; | 
|  | 1993 | } | 
|  | 1994 | EXPORT_SYMBOL(generic_file_remap_pages); | 
|  | 1995 |  | 
| Mike Frysinger | f55f199 | 2011-03-29 14:05:12 +0100 | [diff] [blame] | 1996 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 1997 | unsigned long addr, void *buf, int len, int write) | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1998 | { | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1999 | struct vm_area_struct *vma; | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 2000 |  | 
|  | 2001 | down_read(&mm->mmap_sem); | 
|  | 2002 |  | 
|  | 2003 | /* the access must start within one of the target process's mappings */ | 
| David Howells | 0159b14 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 2004 | vma = find_vma(mm, addr); | 
|  | 2005 | if (vma) { | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 2006 | /* don't overrun this mapping */ | 
|  | 2007 | if (addr + len >= vma->vm_end) | 
|  | 2008 | len = vma->vm_end - addr; | 
|  | 2009 |  | 
|  | 2010 | /* only read or write mappings where it is permitted */ | 
| David Howells | d00c7b9 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 2011 | if (write && vma->vm_flags & VM_MAYWRITE) | 
| Jie Zhang | 7959722 | 2010-01-06 17:23:28 +0000 | [diff] [blame] | 2012 | copy_to_user_page(vma, NULL, addr, | 
|  | 2013 | (void *) addr, buf, len); | 
| David Howells | d00c7b9 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 2014 | else if (!write && vma->vm_flags & VM_MAYREAD) | 
| Jie Zhang | 7959722 | 2010-01-06 17:23:28 +0000 | [diff] [blame] | 2015 | copy_from_user_page(vma, NULL, addr, | 
|  | 2016 | buf, (void *) addr, len); | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 2017 | else | 
|  | 2018 | len = 0; | 
|  | 2019 | } else { | 
|  | 2020 | len = 0; | 
|  | 2021 | } | 
|  | 2022 |  | 
|  | 2023 | up_read(&mm->mmap_sem); | 
| Mike Frysinger | f55f199 | 2011-03-29 14:05:12 +0100 | [diff] [blame] | 2024 |  | 
|  | 2025 | return len; | 
|  | 2026 | } | 
|  | 2027 |  | 
|  | 2028 | /** | 
|  | 2029 | * @access_remote_vm - access another process' address space | 
|  | 2030 | * @mm:		the mm_struct of the target address space | 
|  | 2031 | * @addr:	start address to access | 
|  | 2032 | * @buf:	source or destination buffer | 
|  | 2033 | * @len:	number of bytes to transfer | 
|  | 2034 | * @write:	whether the access is a write | 
|  | 2035 | * | 
|  | 2036 | * The caller must hold a reference on @mm. | 
|  | 2037 | */ | 
|  | 2038 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, | 
|  | 2039 | void *buf, int len, int write) | 
|  | 2040 | { | 
|  | 2041 | return __access_remote_vm(NULL, mm, addr, buf, len, write); | 
|  | 2042 | } | 
|  | 2043 |  | 
|  | 2044 | /* | 
|  | 2045 | * Access another process' address space. | 
|  | 2046 | * - source/target buffer must be kernel space | 
|  | 2047 | */ | 
|  | 2048 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | 
|  | 2049 | { | 
|  | 2050 | struct mm_struct *mm; | 
|  | 2051 |  | 
|  | 2052 | if (addr + len < addr) | 
|  | 2053 | return 0; | 
|  | 2054 |  | 
|  | 2055 | mm = get_task_mm(tsk); | 
|  | 2056 | if (!mm) | 
|  | 2057 | return 0; | 
|  | 2058 |  | 
|  | 2059 | len = __access_remote_vm(tsk, mm, addr, buf, len, write); | 
|  | 2060 |  | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 2061 | mmput(mm); | 
|  | 2062 | return len; | 
|  | 2063 | } | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2064 |  | 
|  | 2065 | /** | 
|  | 2066 | * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode | 
|  | 2067 | * @inode: The inode to check | 
|  | 2068 | * @size: The current filesize of the inode | 
|  | 2069 | * @newsize: The proposed filesize of the inode | 
|  | 2070 | * | 
|  | 2071 | * Check the shared mappings on an inode on behalf of a shrinking truncate to | 
|  | 2072 | * make sure that that any outstanding VMAs aren't broken and then shrink the | 
|  | 2073 | * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't | 
|  | 2074 | * automatically grant mappings that are too large. | 
|  | 2075 | */ | 
|  | 2076 | int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | 
|  | 2077 | size_t newsize) | 
|  | 2078 | { | 
|  | 2079 | struct vm_area_struct *vma; | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2080 | struct vm_region *region; | 
|  | 2081 | pgoff_t low, high; | 
|  | 2082 | size_t r_size, r_top; | 
|  | 2083 |  | 
|  | 2084 | low = newsize >> PAGE_SHIFT; | 
|  | 2085 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 2086 |  | 
|  | 2087 | down_write(&nommu_region_sem); | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 2088 | mutex_lock(&inode->i_mapping->i_mmap_mutex); | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2089 |  | 
|  | 2090 | /* search for VMAs that fall within the dead zone */ | 
| Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 2091 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2092 | /* found one - only interested if it's shared out of the page | 
|  | 2093 | * cache */ | 
|  | 2094 | if (vma->vm_flags & VM_SHARED) { | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 2095 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2096 | up_write(&nommu_region_sem); | 
|  | 2097 | return -ETXTBSY; /* not quite true, but near enough */ | 
|  | 2098 | } | 
|  | 2099 | } | 
|  | 2100 |  | 
|  | 2101 | /* reduce any regions that overlap the dead zone - if in existence, | 
|  | 2102 | * these will be pointed to by VMAs that don't overlap the dead zone | 
|  | 2103 | * | 
|  | 2104 | * we don't check for any regions that start beyond the EOF as there | 
|  | 2105 | * shouldn't be any | 
|  | 2106 | */ | 
| Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 2107 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, | 
|  | 2108 | 0, ULONG_MAX) { | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2109 | if (!(vma->vm_flags & VM_SHARED)) | 
|  | 2110 | continue; | 
|  | 2111 |  | 
|  | 2112 | region = vma->vm_region; | 
|  | 2113 | r_size = region->vm_top - region->vm_start; | 
|  | 2114 | r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; | 
|  | 2115 |  | 
|  | 2116 | if (r_top > newsize) { | 
|  | 2117 | region->vm_top -= r_top - newsize; | 
|  | 2118 | if (region->vm_end > region->vm_top) | 
|  | 2119 | region->vm_end = region->vm_top; | 
|  | 2120 | } | 
|  | 2121 | } | 
|  | 2122 |  | 
| David Howells | 918e556 | 2012-02-23 13:50:35 +0000 | [diff] [blame] | 2123 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | 
| David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 2124 | up_write(&nommu_region_sem); | 
|  | 2125 | return 0; | 
|  | 2126 | } | 
| Andrew Shewmaker | c9b1d09 | 2013-04-29 15:08:10 -0700 | [diff] [blame] | 2127 |  | 
|  | 2128 | /* | 
|  | 2129 | * Initialise sysctl_user_reserve_kbytes. | 
|  | 2130 | * | 
|  | 2131 | * This is intended to prevent a user from starting a single memory hogging | 
|  | 2132 | * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER | 
|  | 2133 | * mode. | 
|  | 2134 | * | 
|  | 2135 | * The default value is min(3% of free memory, 128MB) | 
|  | 2136 | * 128MB is enough to recover with sshd/login, bash, and top/kill. | 
|  | 2137 | */ | 
|  | 2138 | static int __meminit init_user_reserve(void) | 
|  | 2139 | { | 
|  | 2140 | unsigned long free_kbytes; | 
|  | 2141 |  | 
|  | 2142 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); | 
|  | 2143 |  | 
|  | 2144 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); | 
|  | 2145 | return 0; | 
|  | 2146 | } | 
|  | 2147 | module_init(init_user_reserve) | 
| Andrew Shewmaker | 4eeab4f | 2013-04-29 15:08:11 -0700 | [diff] [blame] | 2148 |  | 
|  | 2149 | /* | 
|  | 2150 | * Initialise sysctl_admin_reserve_kbytes. | 
|  | 2151 | * | 
|  | 2152 | * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin | 
|  | 2153 | * to log in and kill a memory hogging process. | 
|  | 2154 | * | 
|  | 2155 | * Systems with more than 256MB will reserve 8MB, enough to recover | 
|  | 2156 | * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will | 
|  | 2157 | * only reserve 3% of free pages by default. | 
|  | 2158 | */ | 
|  | 2159 | static int __meminit init_admin_reserve(void) | 
|  | 2160 | { | 
|  | 2161 | unsigned long free_kbytes; | 
|  | 2162 |  | 
|  | 2163 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); | 
|  | 2164 |  | 
|  | 2165 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); | 
|  | 2166 | return 0; | 
|  | 2167 | } | 
|  | 2168 | module_init(init_admin_reserve) |