| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/mm/nommu.c | 
|  | 3 | * | 
|  | 4 | *  Replacement code for mm functions to support CPU's that don't | 
|  | 5 | *  have any form of memory management unit (thus no virtual memory). | 
|  | 6 | * | 
|  | 7 | *  See Documentation/nommu-mmap.txt | 
|  | 8 | * | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 9 | *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> | 
|  | 11 | *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> | 
|  | 12 | *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com> | 
| Paul Mundt | eb6434d | 2009-01-21 17:45:47 +0900 | [diff] [blame] | 13 | *  Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ | 
|  | 15 |  | 
| David Howells | f2b8544 | 2007-10-29 13:15:39 +0000 | [diff] [blame] | 16 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> | 
|  | 18 | #include <linux/mman.h> | 
|  | 19 | #include <linux/swap.h> | 
|  | 20 | #include <linux/file.h> | 
|  | 21 | #include <linux/highmem.h> | 
|  | 22 | #include <linux/pagemap.h> | 
|  | 23 | #include <linux/slab.h> | 
|  | 24 | #include <linux/vmalloc.h> | 
| Roland McGrath | fa8e26c | 2008-07-25 19:45:50 -0700 | [diff] [blame] | 25 | #include <linux/tracehook.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/blkdev.h> | 
|  | 27 | #include <linux/backing-dev.h> | 
|  | 28 | #include <linux/mount.h> | 
|  | 29 | #include <linux/personality.h> | 
|  | 30 | #include <linux/security.h> | 
|  | 31 | #include <linux/syscalls.h> | 
|  | 32 |  | 
|  | 33 | #include <asm/uaccess.h> | 
|  | 34 | #include <asm/tlb.h> | 
|  | 35 | #include <asm/tlbflush.h> | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 36 | #include "internal.h" | 
|  | 37 |  | 
|  | 38 | static inline __attribute__((format(printf, 1, 2))) | 
|  | 39 | void no_printk(const char *fmt, ...) | 
|  | 40 | { | 
|  | 41 | } | 
|  | 42 |  | 
|  | 43 | #if 0 | 
|  | 44 | #define kenter(FMT, ...) \ | 
|  | 45 | printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | 
|  | 46 | #define kleave(FMT, ...) \ | 
|  | 47 | printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | 
|  | 48 | #define kdebug(FMT, ...) \ | 
|  | 49 | printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) | 
|  | 50 | #else | 
|  | 51 | #define kenter(FMT, ...) \ | 
|  | 52 | no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | 
|  | 53 | #define kleave(FMT, ...) \ | 
|  | 54 | no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | 
|  | 55 | #define kdebug(FMT, ...) \ | 
|  | 56 | no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) | 
|  | 57 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 59 | #include "internal.h" | 
|  | 60 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | void *high_memory; | 
|  | 62 | struct page *mem_map; | 
|  | 63 | unsigned long max_mapnr; | 
|  | 64 | unsigned long num_physpages; | 
| Alan Cox | 80119ef | 2008-05-23 13:04:31 -0700 | [diff] [blame] | 65 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 
|  | 67 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 
|  | 68 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 69 | int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | int heap_stack_gap = 0; | 
|  | 71 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 72 | atomic_t mmap_pages_allocated; | 
|  | 73 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | EXPORT_SYMBOL(mem_map); | 
| Wu, Bryan | 6a04de6 | 2007-04-11 23:28:47 -0700 | [diff] [blame] | 75 | EXPORT_SYMBOL(num_physpages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 77 | /* list of mapped, potentially shareable regions */ | 
|  | 78 | static struct kmem_cache *vm_region_jar; | 
|  | 79 | struct rb_root nommu_region_tree = RB_ROOT; | 
|  | 80 | DECLARE_RWSEM(nommu_region_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 |  | 
|  | 82 | struct vm_operations_struct generic_file_vm_ops = { | 
|  | 83 | }; | 
|  | 84 |  | 
|  | 85 | /* | 
|  | 86 | * Handle all mappings that got truncated by a "truncate()" | 
|  | 87 | * system call. | 
|  | 88 | * | 
|  | 89 | * NOTE! We have to be ready to update the memory sharing | 
|  | 90 | * between the file and the memory map for a potential last | 
|  | 91 | * incomplete page.  Ugly, but necessary. | 
|  | 92 | */ | 
|  | 93 | int vmtruncate(struct inode *inode, loff_t offset) | 
|  | 94 | { | 
|  | 95 | struct address_space *mapping = inode->i_mapping; | 
|  | 96 | unsigned long limit; | 
|  | 97 |  | 
|  | 98 | if (inode->i_size < offset) | 
|  | 99 | goto do_expand; | 
|  | 100 | i_size_write(inode, offset); | 
|  | 101 |  | 
|  | 102 | truncate_inode_pages(mapping, offset); | 
|  | 103 | goto out_truncate; | 
|  | 104 |  | 
|  | 105 | do_expand: | 
|  | 106 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | 
|  | 107 | if (limit != RLIM_INFINITY && offset > limit) | 
|  | 108 | goto out_sig; | 
|  | 109 | if (offset > inode->i_sb->s_maxbytes) | 
|  | 110 | goto out; | 
|  | 111 | i_size_write(inode, offset); | 
|  | 112 |  | 
|  | 113 | out_truncate: | 
| Al Viro | acfa438 | 2008-12-04 10:06:33 -0500 | [diff] [blame] | 114 | if (inode->i_op->truncate) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | inode->i_op->truncate(inode); | 
|  | 116 | return 0; | 
|  | 117 | out_sig: | 
|  | 118 | send_sig(SIGXFSZ, current, 0); | 
|  | 119 | out: | 
|  | 120 | return -EFBIG; | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | EXPORT_SYMBOL(vmtruncate); | 
|  | 124 |  | 
|  | 125 | /* | 
|  | 126 | * Return the total memory allocated for this pointer, not | 
|  | 127 | * just what the caller asked for. | 
|  | 128 | * | 
|  | 129 | * Doesn't have to be accurate, i.e. may have races. | 
|  | 130 | */ | 
|  | 131 | unsigned int kobjsize(const void *objp) | 
|  | 132 | { | 
|  | 133 | struct page *page; | 
|  | 134 |  | 
| Michael Hennerich | 4016a13 | 2008-04-28 02:13:38 -0700 | [diff] [blame] | 135 | /* | 
|  | 136 | * If the object we have should not have ksize performed on it, | 
|  | 137 | * return size of 0 | 
|  | 138 | */ | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 139 | if (!objp || !virt_addr_valid(objp)) | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 140 | return 0; | 
|  | 141 |  | 
|  | 142 | page = virt_to_head_page(objp); | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 143 |  | 
|  | 144 | /* | 
|  | 145 | * If the allocator sets PageSlab, we know the pointer came from | 
|  | 146 | * kmalloc(). | 
|  | 147 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | if (PageSlab(page)) | 
|  | 149 | return ksize(objp); | 
|  | 150 |  | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 151 | /* | 
| Paul Mundt | ab2e83e | 2009-01-08 12:04:48 +0000 | [diff] [blame] | 152 | * If it's not a compound page, see if we have a matching VMA | 
|  | 153 | * region. This test is intentionally done in reverse order, | 
|  | 154 | * so if there's no VMA, we still fall through and hand back | 
|  | 155 | * PAGE_SIZE for 0-order pages. | 
|  | 156 | */ | 
|  | 157 | if (!PageCompound(page)) { | 
|  | 158 | struct vm_area_struct *vma; | 
|  | 159 |  | 
|  | 160 | vma = find_vma(current->mm, (unsigned long)objp); | 
|  | 161 | if (vma) | 
|  | 162 | return vma->vm_end - vma->vm_start; | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | /* | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 166 | * The ksize() function is only guaranteed to work for pointers | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 167 | * returned by kmalloc(). So handle arbitrary pointers here. | 
| Paul Mundt | 6cfd53f | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 168 | */ | 
| Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 169 | return PAGE_SIZE << compound_order(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } | 
|  | 171 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 172 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 173 | unsigned long start, int len, int flags, | 
|  | 174 | struct page **pages, struct vm_area_struct **vmas) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | { | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 176 | struct vm_area_struct *vma; | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 177 | unsigned long vm_flags; | 
|  | 178 | int i; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 179 | int write = !!(flags & GUP_FLAGS_WRITE); | 
|  | 180 | int force = !!(flags & GUP_FLAGS_FORCE); | 
|  | 181 | int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 182 |  | 
|  | 183 | /* calculate required read or write permissions. | 
|  | 184 | * - if 'force' is set, we only require the "MAY" flags. | 
|  | 185 | */ | 
|  | 186 | vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 
|  | 187 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  | 
|  | 189 | for (i = 0; i < len; i++) { | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 190 | vma = find_vma(mm, start); | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 191 | if (!vma) | 
|  | 192 | goto finish_or_fault; | 
|  | 193 |  | 
|  | 194 | /* protect what we can, including chardevs */ | 
|  | 195 | if (vma->vm_flags & (VM_IO | VM_PFNMAP) || | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 196 | (!ignore && !(vm_flags & vma->vm_flags))) | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 197 | goto finish_or_fault; | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 198 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | if (pages) { | 
|  | 200 | pages[i] = virt_to_page(start); | 
|  | 201 | if (pages[i]) | 
|  | 202 | page_cache_get(pages[i]); | 
|  | 203 | } | 
|  | 204 | if (vmas) | 
| Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 205 | vmas[i] = vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | start += PAGE_SIZE; | 
|  | 207 | } | 
| David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 208 |  | 
|  | 209 | return i; | 
|  | 210 |  | 
|  | 211 | finish_or_fault: | 
|  | 212 | return i ? : -EFAULT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 214 |  | 
|  | 215 |  | 
|  | 216 | /* | 
|  | 217 | * get a list of pages in an address range belonging to the specified process | 
|  | 218 | * and indicate the VMA that covers each page | 
|  | 219 | * - this is potentially dodgy as we may end incrementing the page count of a | 
|  | 220 | *   slab page or a secondary page from a compound page | 
|  | 221 | * - don't permit access to VMAs that don't support it, such as I/O mappings | 
|  | 222 | */ | 
|  | 223 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 224 | unsigned long start, int len, int write, int force, | 
|  | 225 | struct page **pages, struct vm_area_struct **vmas) | 
|  | 226 | { | 
|  | 227 | int flags = 0; | 
|  | 228 |  | 
|  | 229 | if (write) | 
|  | 230 | flags |= GUP_FLAGS_WRITE; | 
|  | 231 | if (force) | 
|  | 232 | flags |= GUP_FLAGS_FORCE; | 
|  | 233 |  | 
|  | 234 | return __get_user_pages(tsk, mm, | 
|  | 235 | start, len, flags, | 
|  | 236 | pages, vmas); | 
|  | 237 | } | 
| Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 238 | EXPORT_SYMBOL(get_user_pages); | 
|  | 239 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | DEFINE_RWLOCK(vmlist_lock); | 
|  | 241 | struct vm_struct *vmlist; | 
|  | 242 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 243 | void vfree(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | { | 
|  | 245 | kfree(addr); | 
|  | 246 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 247 | EXPORT_SYMBOL(vfree); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 249 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | { | 
|  | 251 | /* | 
| Robert P. J. Day | 8518609 | 2007-10-19 23:11:38 +0200 | [diff] [blame] | 252 | *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() | 
|  | 253 | * returns only a logical address. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | */ | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 255 | return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 257 | EXPORT_SYMBOL(__vmalloc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 |  | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 259 | void *vmalloc_user(unsigned long size) | 
|  | 260 | { | 
|  | 261 | void *ret; | 
|  | 262 |  | 
|  | 263 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 
|  | 264 | PAGE_KERNEL); | 
|  | 265 | if (ret) { | 
|  | 266 | struct vm_area_struct *vma; | 
|  | 267 |  | 
|  | 268 | down_write(¤t->mm->mmap_sem); | 
|  | 269 | vma = find_vma(current->mm, (unsigned long)ret); | 
|  | 270 | if (vma) | 
|  | 271 | vma->vm_flags |= VM_USERMAP; | 
|  | 272 | up_write(¤t->mm->mmap_sem); | 
|  | 273 | } | 
|  | 274 |  | 
|  | 275 | return ret; | 
|  | 276 | } | 
|  | 277 | EXPORT_SYMBOL(vmalloc_user); | 
|  | 278 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 279 | struct page *vmalloc_to_page(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | { | 
|  | 281 | return virt_to_page(addr); | 
|  | 282 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 283 | EXPORT_SYMBOL(vmalloc_to_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 285 | unsigned long vmalloc_to_pfn(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | { | 
|  | 287 | return page_to_pfn(virt_to_page(addr)); | 
|  | 288 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 289 | EXPORT_SYMBOL(vmalloc_to_pfn); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 |  | 
|  | 291 | long vread(char *buf, char *addr, unsigned long count) | 
|  | 292 | { | 
|  | 293 | memcpy(buf, addr, count); | 
|  | 294 | return count; | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | long vwrite(char *buf, char *addr, unsigned long count) | 
|  | 298 | { | 
|  | 299 | /* Don't allow overflow */ | 
|  | 300 | if ((unsigned long) addr + count < count) | 
|  | 301 | count = -(unsigned long) addr; | 
|  | 302 |  | 
|  | 303 | memcpy(addr, buf, count); | 
|  | 304 | return(count); | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | /* | 
|  | 308 | *	vmalloc  -  allocate virtually continguos memory | 
|  | 309 | * | 
|  | 310 | *	@size:		allocation size | 
|  | 311 | * | 
|  | 312 | *	Allocate enough pages to cover @size from the page level | 
|  | 313 | *	allocator and map them into continguos kernel virtual space. | 
|  | 314 | * | 
| Michael Opdenacker | c1c8897 | 2006-10-03 23:21:02 +0200 | [diff] [blame] | 315 | *	For tight control over page level allocator and protection flags | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | *	use __vmalloc() instead. | 
|  | 317 | */ | 
|  | 318 | void *vmalloc(unsigned long size) | 
|  | 319 | { | 
|  | 320 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | 
|  | 321 | } | 
| Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 322 | EXPORT_SYMBOL(vmalloc); | 
|  | 323 |  | 
|  | 324 | void *vmalloc_node(unsigned long size, int node) | 
|  | 325 | { | 
|  | 326 | return vmalloc(size); | 
|  | 327 | } | 
|  | 328 | EXPORT_SYMBOL(vmalloc_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 |  | 
| Paul Mundt | 1af446e | 2008-08-04 16:01:47 +0900 | [diff] [blame] | 330 | #ifndef PAGE_KERNEL_EXEC | 
|  | 331 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | 
|  | 332 | #endif | 
|  | 333 |  | 
|  | 334 | /** | 
|  | 335 | *	vmalloc_exec  -  allocate virtually contiguous, executable memory | 
|  | 336 | *	@size:		allocation size | 
|  | 337 | * | 
|  | 338 | *	Kernel-internal function to allocate enough pages to cover @size | 
|  | 339 | *	the page level allocator and map them into contiguous and | 
|  | 340 | *	executable kernel virtual space. | 
|  | 341 | * | 
|  | 342 | *	For tight control over page level allocator and protection flags | 
|  | 343 | *	use __vmalloc() instead. | 
|  | 344 | */ | 
|  | 345 |  | 
|  | 346 | void *vmalloc_exec(unsigned long size) | 
|  | 347 | { | 
|  | 348 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | 
|  | 349 | } | 
|  | 350 |  | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 351 | /** | 
|  | 352 | * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | *	@size:		allocation size | 
|  | 354 | * | 
|  | 355 | *	Allocate enough 32bit PA addressable pages to cover @size from the | 
|  | 356 | *	page level allocator and map them into continguos kernel virtual space. | 
|  | 357 | */ | 
|  | 358 | void *vmalloc_32(unsigned long size) | 
|  | 359 | { | 
|  | 360 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); | 
|  | 361 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 362 | EXPORT_SYMBOL(vmalloc_32); | 
|  | 363 |  | 
|  | 364 | /** | 
|  | 365 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory | 
|  | 366 | *	@size:		allocation size | 
|  | 367 | * | 
|  | 368 | * The resulting memory area is 32bit addressable and zeroed so it can be | 
|  | 369 | * mapped to userspace without leaking data. | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 370 | * | 
|  | 371 | * VM_USERMAP is set on the corresponding VMA so that subsequent calls to | 
|  | 372 | * remap_vmalloc_range() are permissible. | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 373 | */ | 
|  | 374 | void *vmalloc_32_user(unsigned long size) | 
|  | 375 | { | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 376 | /* | 
|  | 377 | * We'll have to sort out the ZONE_DMA bits for 64-bit, | 
|  | 378 | * but for now this can simply use vmalloc_user() directly. | 
|  | 379 | */ | 
|  | 380 | return vmalloc_user(size); | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 381 | } | 
|  | 382 | EXPORT_SYMBOL(vmalloc_32_user); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 |  | 
|  | 384 | void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) | 
|  | 385 | { | 
|  | 386 | BUG(); | 
|  | 387 | return NULL; | 
|  | 388 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 389 | EXPORT_SYMBOL(vmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 |  | 
| Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 391 | void vunmap(const void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | { | 
|  | 393 | BUG(); | 
|  | 394 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 395 | EXPORT_SYMBOL(vunmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 |  | 
| Paul Mundt | eb6434d | 2009-01-21 17:45:47 +0900 | [diff] [blame] | 397 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) | 
|  | 398 | { | 
|  | 399 | BUG(); | 
|  | 400 | return NULL; | 
|  | 401 | } | 
|  | 402 | EXPORT_SYMBOL(vm_map_ram); | 
|  | 403 |  | 
|  | 404 | void vm_unmap_ram(const void *mem, unsigned int count) | 
|  | 405 | { | 
|  | 406 | BUG(); | 
|  | 407 | } | 
|  | 408 | EXPORT_SYMBOL(vm_unmap_ram); | 
|  | 409 |  | 
|  | 410 | void vm_unmap_aliases(void) | 
|  | 411 | { | 
|  | 412 | } | 
|  | 413 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); | 
|  | 414 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | /* | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 416 | * Implement a stub for vmalloc_sync_all() if the architecture chose not to | 
|  | 417 | * have one. | 
|  | 418 | */ | 
|  | 419 | void  __attribute__((weak)) vmalloc_sync_all(void) | 
|  | 420 | { | 
|  | 421 | } | 
|  | 422 |  | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 423 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, | 
|  | 424 | struct page *page) | 
|  | 425 | { | 
|  | 426 | return -EINVAL; | 
|  | 427 | } | 
|  | 428 | EXPORT_SYMBOL(vm_insert_page); | 
|  | 429 |  | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 430 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | *  sys_brk() for the most part doesn't need the global kernel | 
|  | 432 | *  lock, except when an application is doing something nasty | 
|  | 433 | *  like trying to un-brk an area that has already been mapped | 
|  | 434 | *  to a regular file.  in this case, the unmapping will need | 
|  | 435 | *  to invoke file system routines that need the global lock. | 
|  | 436 | */ | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 437 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | { | 
|  | 439 | struct mm_struct *mm = current->mm; | 
|  | 440 |  | 
|  | 441 | if (brk < mm->start_brk || brk > mm->context.end_brk) | 
|  | 442 | return mm->brk; | 
|  | 443 |  | 
|  | 444 | if (mm->brk == brk) | 
|  | 445 | return mm->brk; | 
|  | 446 |  | 
|  | 447 | /* | 
|  | 448 | * Always allow shrinking brk | 
|  | 449 | */ | 
|  | 450 | if (brk <= mm->brk) { | 
|  | 451 | mm->brk = brk; | 
|  | 452 | return brk; | 
|  | 453 | } | 
|  | 454 |  | 
|  | 455 | /* | 
|  | 456 | * Ok, looks good - let it rip. | 
|  | 457 | */ | 
|  | 458 | return mm->brk = brk; | 
|  | 459 | } | 
|  | 460 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 461 | /* | 
|  | 462 | * initialise the VMA and region record slabs | 
|  | 463 | */ | 
|  | 464 | void __init mmap_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 466 | vm_region_jar = kmem_cache_create("vm_region_jar", | 
|  | 467 | sizeof(struct vm_region), 0, | 
|  | 468 | SLAB_PANIC, NULL); | 
|  | 469 | vm_area_cachep = kmem_cache_create("vm_area_struct", | 
|  | 470 | sizeof(struct vm_area_struct), 0, | 
|  | 471 | SLAB_PANIC, NULL); | 
|  | 472 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 474 | /* | 
|  | 475 | * validate the region tree | 
|  | 476 | * - the caller must hold the region lock | 
|  | 477 | */ | 
|  | 478 | #ifdef CONFIG_DEBUG_NOMMU_REGIONS | 
|  | 479 | static noinline void validate_nommu_regions(void) | 
|  | 480 | { | 
|  | 481 | struct vm_region *region, *last; | 
|  | 482 | struct rb_node *p, *lastp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 484 | lastp = rb_first(&nommu_region_tree); | 
|  | 485 | if (!lastp) | 
|  | 486 | return; | 
|  | 487 |  | 
|  | 488 | last = rb_entry(lastp, struct vm_region, vm_rb); | 
|  | 489 | if (unlikely(last->vm_end <= last->vm_start)) | 
|  | 490 | BUG(); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 491 | if (unlikely(last->vm_top < last->vm_end)) | 
|  | 492 | BUG(); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 493 |  | 
|  | 494 | while ((p = rb_next(lastp))) { | 
|  | 495 | region = rb_entry(p, struct vm_region, vm_rb); | 
|  | 496 | last = rb_entry(lastp, struct vm_region, vm_rb); | 
|  | 497 |  | 
|  | 498 | if (unlikely(region->vm_end <= region->vm_start)) | 
|  | 499 | BUG(); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 500 | if (unlikely(region->vm_top < region->vm_end)) | 
|  | 501 | BUG(); | 
|  | 502 | if (unlikely(region->vm_start < last->vm_top)) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 503 | BUG(); | 
|  | 504 |  | 
|  | 505 | lastp = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | } | 
|  | 507 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 508 | #else | 
|  | 509 | #define validate_nommu_regions() do {} while(0) | 
|  | 510 | #endif | 
|  | 511 |  | 
|  | 512 | /* | 
|  | 513 | * add a region into the global tree | 
|  | 514 | */ | 
|  | 515 | static void add_nommu_region(struct vm_region *region) | 
|  | 516 | { | 
|  | 517 | struct vm_region *pregion; | 
|  | 518 | struct rb_node **p, *parent; | 
|  | 519 |  | 
|  | 520 | validate_nommu_regions(); | 
|  | 521 |  | 
|  | 522 | BUG_ON(region->vm_start & ~PAGE_MASK); | 
|  | 523 |  | 
|  | 524 | parent = NULL; | 
|  | 525 | p = &nommu_region_tree.rb_node; | 
|  | 526 | while (*p) { | 
|  | 527 | parent = *p; | 
|  | 528 | pregion = rb_entry(parent, struct vm_region, vm_rb); | 
|  | 529 | if (region->vm_start < pregion->vm_start) | 
|  | 530 | p = &(*p)->rb_left; | 
|  | 531 | else if (region->vm_start > pregion->vm_start) | 
|  | 532 | p = &(*p)->rb_right; | 
|  | 533 | else if (pregion == region) | 
|  | 534 | return; | 
|  | 535 | else | 
|  | 536 | BUG(); | 
|  | 537 | } | 
|  | 538 |  | 
|  | 539 | rb_link_node(®ion->vm_rb, parent, p); | 
|  | 540 | rb_insert_color(®ion->vm_rb, &nommu_region_tree); | 
|  | 541 |  | 
|  | 542 | validate_nommu_regions(); | 
|  | 543 | } | 
|  | 544 |  | 
|  | 545 | /* | 
|  | 546 | * delete a region from the global tree | 
|  | 547 | */ | 
|  | 548 | static void delete_nommu_region(struct vm_region *region) | 
|  | 549 | { | 
|  | 550 | BUG_ON(!nommu_region_tree.rb_node); | 
|  | 551 |  | 
|  | 552 | validate_nommu_regions(); | 
|  | 553 | rb_erase(®ion->vm_rb, &nommu_region_tree); | 
|  | 554 | validate_nommu_regions(); | 
|  | 555 | } | 
|  | 556 |  | 
|  | 557 | /* | 
|  | 558 | * free a contiguous series of pages | 
|  | 559 | */ | 
|  | 560 | static void free_page_series(unsigned long from, unsigned long to) | 
|  | 561 | { | 
|  | 562 | for (; from < to; from += PAGE_SIZE) { | 
|  | 563 | struct page *page = virt_to_page(from); | 
|  | 564 |  | 
|  | 565 | kdebug("- free %lx", from); | 
|  | 566 | atomic_dec(&mmap_pages_allocated); | 
|  | 567 | if (page_count(page) != 1) | 
|  | 568 | kdebug("free page %p [%d]", page, page_count(page)); | 
|  | 569 | put_page(page); | 
|  | 570 | } | 
|  | 571 | } | 
|  | 572 |  | 
|  | 573 | /* | 
|  | 574 | * release a reference to a region | 
|  | 575 | * - the caller must hold the region semaphore, which this releases | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 576 | * - the region may not have been added to the tree yet, in which case vm_top | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 577 | *   will equal vm_start | 
|  | 578 | */ | 
|  | 579 | static void __put_nommu_region(struct vm_region *region) | 
|  | 580 | __releases(nommu_region_sem) | 
|  | 581 | { | 
|  | 582 | kenter("%p{%d}", region, atomic_read(®ion->vm_usage)); | 
|  | 583 |  | 
|  | 584 | BUG_ON(!nommu_region_tree.rb_node); | 
|  | 585 |  | 
|  | 586 | if (atomic_dec_and_test(®ion->vm_usage)) { | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 587 | if (region->vm_top > region->vm_start) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 588 | delete_nommu_region(region); | 
|  | 589 | up_write(&nommu_region_sem); | 
|  | 590 |  | 
|  | 591 | if (region->vm_file) | 
|  | 592 | fput(region->vm_file); | 
|  | 593 |  | 
|  | 594 | /* IO memory and memory shared directly out of the pagecache | 
|  | 595 | * from ramfs/tmpfs mustn't be released here */ | 
|  | 596 | if (region->vm_flags & VM_MAPPED_COPY) { | 
|  | 597 | kdebug("free series"); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 598 | free_page_series(region->vm_start, region->vm_top); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 599 | } | 
|  | 600 | kmem_cache_free(vm_region_jar, region); | 
|  | 601 | } else { | 
|  | 602 | up_write(&nommu_region_sem); | 
|  | 603 | } | 
|  | 604 | } | 
|  | 605 |  | 
|  | 606 | /* | 
|  | 607 | * release a reference to a region | 
|  | 608 | */ | 
|  | 609 | static void put_nommu_region(struct vm_region *region) | 
|  | 610 | { | 
|  | 611 | down_write(&nommu_region_sem); | 
|  | 612 | __put_nommu_region(region); | 
|  | 613 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 615 | /* | 
|  | 616 | * add a VMA into a process's mm_struct in the appropriate place in the list | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 617 | * and tree and add to the address space's page tree also if not an anonymous | 
|  | 618 | * page | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 619 | * - should be called with mm->mmap_sem held writelocked | 
|  | 620 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 621 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 622 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 623 | struct vm_area_struct *pvma, **pp; | 
|  | 624 | struct address_space *mapping; | 
|  | 625 | struct rb_node **p, *parent; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 626 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 627 | kenter(",%p", vma); | 
|  | 628 |  | 
|  | 629 | BUG_ON(!vma->vm_region); | 
|  | 630 |  | 
|  | 631 | mm->map_count++; | 
|  | 632 | vma->vm_mm = mm; | 
|  | 633 |  | 
|  | 634 | /* add the VMA to the mapping */ | 
|  | 635 | if (vma->vm_file) { | 
|  | 636 | mapping = vma->vm_file->f_mapping; | 
|  | 637 |  | 
|  | 638 | flush_dcache_mmap_lock(mapping); | 
|  | 639 | vma_prio_tree_insert(vma, &mapping->i_mmap); | 
|  | 640 | flush_dcache_mmap_unlock(mapping); | 
|  | 641 | } | 
|  | 642 |  | 
|  | 643 | /* add the VMA to the tree */ | 
|  | 644 | parent = NULL; | 
|  | 645 | p = &mm->mm_rb.rb_node; | 
|  | 646 | while (*p) { | 
|  | 647 | parent = *p; | 
|  | 648 | pvma = rb_entry(parent, struct vm_area_struct, vm_rb); | 
|  | 649 |  | 
|  | 650 | /* sort by: start addr, end addr, VMA struct addr in that order | 
|  | 651 | * (the latter is necessary as we may get identical VMAs) */ | 
|  | 652 | if (vma->vm_start < pvma->vm_start) | 
|  | 653 | p = &(*p)->rb_left; | 
|  | 654 | else if (vma->vm_start > pvma->vm_start) | 
|  | 655 | p = &(*p)->rb_right; | 
|  | 656 | else if (vma->vm_end < pvma->vm_end) | 
|  | 657 | p = &(*p)->rb_left; | 
|  | 658 | else if (vma->vm_end > pvma->vm_end) | 
|  | 659 | p = &(*p)->rb_right; | 
|  | 660 | else if (vma < pvma) | 
|  | 661 | p = &(*p)->rb_left; | 
|  | 662 | else if (vma > pvma) | 
|  | 663 | p = &(*p)->rb_right; | 
|  | 664 | else | 
|  | 665 | BUG(); | 
|  | 666 | } | 
|  | 667 |  | 
|  | 668 | rb_link_node(&vma->vm_rb, parent, p); | 
|  | 669 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); | 
|  | 670 |  | 
|  | 671 | /* add VMA to the VMA list also */ | 
|  | 672 | for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { | 
|  | 673 | if (pvma->vm_start > vma->vm_start) | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 674 | break; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 675 | if (pvma->vm_start < vma->vm_start) | 
|  | 676 | continue; | 
|  | 677 | if (pvma->vm_end < vma->vm_end) | 
|  | 678 | break; | 
|  | 679 | } | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 680 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 681 | vma->vm_next = *pp; | 
|  | 682 | *pp = vma; | 
|  | 683 | } | 
|  | 684 |  | 
|  | 685 | /* | 
|  | 686 | * delete a VMA from its owning mm_struct and address space | 
|  | 687 | */ | 
|  | 688 | static void delete_vma_from_mm(struct vm_area_struct *vma) | 
|  | 689 | { | 
|  | 690 | struct vm_area_struct **pp; | 
|  | 691 | struct address_space *mapping; | 
|  | 692 | struct mm_struct *mm = vma->vm_mm; | 
|  | 693 |  | 
|  | 694 | kenter("%p", vma); | 
|  | 695 |  | 
|  | 696 | mm->map_count--; | 
|  | 697 | if (mm->mmap_cache == vma) | 
|  | 698 | mm->mmap_cache = NULL; | 
|  | 699 |  | 
|  | 700 | /* remove the VMA from the mapping */ | 
|  | 701 | if (vma->vm_file) { | 
|  | 702 | mapping = vma->vm_file->f_mapping; | 
|  | 703 |  | 
|  | 704 | flush_dcache_mmap_lock(mapping); | 
|  | 705 | vma_prio_tree_remove(vma, &mapping->i_mmap); | 
|  | 706 | flush_dcache_mmap_unlock(mapping); | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | /* remove from the MM's tree and list */ | 
|  | 710 | rb_erase(&vma->vm_rb, &mm->mm_rb); | 
|  | 711 | for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { | 
|  | 712 | if (*pp == vma) { | 
|  | 713 | *pp = vma->vm_next; | 
|  | 714 | break; | 
|  | 715 | } | 
|  | 716 | } | 
|  | 717 |  | 
|  | 718 | vma->vm_mm = NULL; | 
|  | 719 | } | 
|  | 720 |  | 
|  | 721 | /* | 
|  | 722 | * destroy a VMA record | 
|  | 723 | */ | 
|  | 724 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) | 
|  | 725 | { | 
|  | 726 | kenter("%p", vma); | 
|  | 727 | if (vma->vm_ops && vma->vm_ops->close) | 
|  | 728 | vma->vm_ops->close(vma); | 
|  | 729 | if (vma->vm_file) { | 
|  | 730 | fput(vma->vm_file); | 
|  | 731 | if (vma->vm_flags & VM_EXECUTABLE) | 
|  | 732 | removed_exe_file_vma(mm); | 
|  | 733 | } | 
|  | 734 | put_nommu_region(vma->vm_region); | 
|  | 735 | kmem_cache_free(vm_area_cachep, vma); | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 736 | } | 
|  | 737 |  | 
|  | 738 | /* | 
|  | 739 | * look up the first VMA in which addr resides, NULL if none | 
|  | 740 | * - should be called with mm->mmap_sem at least held readlocked | 
|  | 741 | */ | 
|  | 742 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | 
|  | 743 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 744 | struct vm_area_struct *vma; | 
|  | 745 | struct rb_node *n = mm->mm_rb.rb_node; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 746 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 747 | /* check the cache first */ | 
|  | 748 | vma = mm->mmap_cache; | 
|  | 749 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) | 
|  | 750 | return vma; | 
|  | 751 |  | 
|  | 752 | /* trawl the tree (there may be multiple mappings in which addr | 
|  | 753 | * resides) */ | 
|  | 754 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | 
|  | 755 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | 
|  | 756 | if (vma->vm_start > addr) | 
|  | 757 | return NULL; | 
|  | 758 | if (vma->vm_end > addr) { | 
|  | 759 | mm->mmap_cache = vma; | 
|  | 760 | return vma; | 
|  | 761 | } | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 762 | } | 
|  | 763 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 764 | return NULL; | 
|  | 765 | } | 
|  | 766 | EXPORT_SYMBOL(find_vma); | 
|  | 767 |  | 
|  | 768 | /* | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 769 | * find a VMA | 
|  | 770 | * - we don't extend stack VMAs under NOMMU conditions | 
|  | 771 | */ | 
|  | 772 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | 
|  | 773 | { | 
|  | 774 | return find_vma(mm, addr); | 
|  | 775 | } | 
|  | 776 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 777 | /* | 
|  | 778 | * expand a stack to a given address | 
|  | 779 | * - not supported under NOMMU conditions | 
|  | 780 | */ | 
| Greg Ungerer | 57c8f63 | 2007-07-15 23:38:28 -0700 | [diff] [blame] | 781 | int expand_stack(struct vm_area_struct *vma, unsigned long address) | 
|  | 782 | { | 
|  | 783 | return -ENOMEM; | 
|  | 784 | } | 
|  | 785 |  | 
| David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 786 | /* | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 787 | * look up the first VMA exactly that exactly matches addr | 
|  | 788 | * - should be called with mm->mmap_sem at least held readlocked | 
|  | 789 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 790 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, | 
|  | 791 | unsigned long addr, | 
|  | 792 | unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | { | 
|  | 794 | struct vm_area_struct *vma; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 795 | struct rb_node *n = mm->mm_rb.rb_node; | 
|  | 796 | unsigned long end = addr + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 798 | /* check the cache first */ | 
|  | 799 | vma = mm->mmap_cache; | 
|  | 800 | if (vma && vma->vm_start == addr && vma->vm_end == end) | 
|  | 801 | return vma; | 
|  | 802 |  | 
|  | 803 | /* trawl the tree (there may be multiple mappings in which addr | 
|  | 804 | * resides) */ | 
|  | 805 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 807 | if (vma->vm_start < addr) | 
|  | 808 | continue; | 
|  | 809 | if (vma->vm_start > addr) | 
|  | 810 | return NULL; | 
|  | 811 | if (vma->vm_end == end) { | 
|  | 812 | mm->mmap_cache = vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | return vma; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 814 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | } | 
|  | 816 |  | 
|  | 817 | return NULL; | 
|  | 818 | } | 
|  | 819 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 820 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | * determine whether a mapping should be permitted and, if so, what sort of | 
|  | 822 | * mapping we're capable of supporting | 
|  | 823 | */ | 
|  | 824 | static int validate_mmap_request(struct file *file, | 
|  | 825 | unsigned long addr, | 
|  | 826 | unsigned long len, | 
|  | 827 | unsigned long prot, | 
|  | 828 | unsigned long flags, | 
|  | 829 | unsigned long pgoff, | 
|  | 830 | unsigned long *_capabilities) | 
|  | 831 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 832 | unsigned long capabilities, rlen; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | unsigned long reqprot = prot; | 
|  | 834 | int ret; | 
|  | 835 |  | 
|  | 836 | /* do the simple checks first */ | 
|  | 837 | if (flags & MAP_FIXED || addr) { | 
|  | 838 | printk(KERN_DEBUG | 
|  | 839 | "%d: Can't do fixed-address/overlay mmap of RAM\n", | 
|  | 840 | current->pid); | 
|  | 841 | return -EINVAL; | 
|  | 842 | } | 
|  | 843 |  | 
|  | 844 | if ((flags & MAP_TYPE) != MAP_PRIVATE && | 
|  | 845 | (flags & MAP_TYPE) != MAP_SHARED) | 
|  | 846 | return -EINVAL; | 
|  | 847 |  | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 848 | if (!len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | return -EINVAL; | 
|  | 850 |  | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 851 | /* Careful about overflows.. */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 852 | rlen = PAGE_ALIGN(len); | 
|  | 853 | if (!rlen || rlen > TASK_SIZE) | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 854 | return -ENOMEM; | 
|  | 855 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | /* offset overflow? */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 857 | if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) | 
| Mike Frysinger | f81cff0 | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 858 | return -EOVERFLOW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 |  | 
|  | 860 | if (file) { | 
|  | 861 | /* validate file mapping requests */ | 
|  | 862 | struct address_space *mapping; | 
|  | 863 |  | 
|  | 864 | /* files must support mmap */ | 
|  | 865 | if (!file->f_op || !file->f_op->mmap) | 
|  | 866 | return -ENODEV; | 
|  | 867 |  | 
|  | 868 | /* work out if what we've got could possibly be shared | 
|  | 869 | * - we support chardevs that provide their own "memory" | 
|  | 870 | * - we support files/blockdevs that are memory backed | 
|  | 871 | */ | 
|  | 872 | mapping = file->f_mapping; | 
|  | 873 | if (!mapping) | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 874 | mapping = file->f_path.dentry->d_inode->i_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 |  | 
|  | 876 | capabilities = 0; | 
|  | 877 | if (mapping && mapping->backing_dev_info) | 
|  | 878 | capabilities = mapping->backing_dev_info->capabilities; | 
|  | 879 |  | 
|  | 880 | if (!capabilities) { | 
|  | 881 | /* no explicit capabilities set, so assume some | 
|  | 882 | * defaults */ | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 883 | switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | case S_IFREG: | 
|  | 885 | case S_IFBLK: | 
|  | 886 | capabilities = BDI_CAP_MAP_COPY; | 
|  | 887 | break; | 
|  | 888 |  | 
|  | 889 | case S_IFCHR: | 
|  | 890 | capabilities = | 
|  | 891 | BDI_CAP_MAP_DIRECT | | 
|  | 892 | BDI_CAP_READ_MAP | | 
|  | 893 | BDI_CAP_WRITE_MAP; | 
|  | 894 | break; | 
|  | 895 |  | 
|  | 896 | default: | 
|  | 897 | return -EINVAL; | 
|  | 898 | } | 
|  | 899 | } | 
|  | 900 |  | 
|  | 901 | /* eliminate any capabilities that we can't support on this | 
|  | 902 | * device */ | 
|  | 903 | if (!file->f_op->get_unmapped_area) | 
|  | 904 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
|  | 905 | if (!file->f_op->read) | 
|  | 906 | capabilities &= ~BDI_CAP_MAP_COPY; | 
|  | 907 |  | 
|  | 908 | if (flags & MAP_SHARED) { | 
|  | 909 | /* do checks for writing, appending and locking */ | 
|  | 910 | if ((prot & PROT_WRITE) && | 
|  | 911 | !(file->f_mode & FMODE_WRITE)) | 
|  | 912 | return -EACCES; | 
|  | 913 |  | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 914 | if (IS_APPEND(file->f_path.dentry->d_inode) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | (file->f_mode & FMODE_WRITE)) | 
|  | 916 | return -EACCES; | 
|  | 917 |  | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 918 | if (locks_verify_locked(file->f_path.dentry->d_inode)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | return -EAGAIN; | 
|  | 920 |  | 
|  | 921 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 
|  | 922 | return -ENODEV; | 
|  | 923 |  | 
|  | 924 | if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  || | 
|  | 925 | ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || | 
|  | 926 | ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP)) | 
|  | 927 | ) { | 
|  | 928 | printk("MAP_SHARED not completely supported on !MMU\n"); | 
|  | 929 | return -EINVAL; | 
|  | 930 | } | 
|  | 931 |  | 
|  | 932 | /* we mustn't privatise shared mappings */ | 
|  | 933 | capabilities &= ~BDI_CAP_MAP_COPY; | 
|  | 934 | } | 
|  | 935 | else { | 
|  | 936 | /* we're going to read the file into private memory we | 
|  | 937 | * allocate */ | 
|  | 938 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 
|  | 939 | return -ENODEV; | 
|  | 940 |  | 
|  | 941 | /* we don't permit a private writable mapping to be | 
|  | 942 | * shared with the backing device */ | 
|  | 943 | if (prot & PROT_WRITE) | 
|  | 944 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
|  | 945 | } | 
|  | 946 |  | 
|  | 947 | /* handle executable mappings and implied executable | 
|  | 948 | * mappings */ | 
| Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 949 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | if (prot & PROT_EXEC) | 
|  | 951 | return -EPERM; | 
|  | 952 | } | 
|  | 953 | else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { | 
|  | 954 | /* handle implication of PROT_EXEC by PROT_READ */ | 
|  | 955 | if (current->personality & READ_IMPLIES_EXEC) { | 
|  | 956 | if (capabilities & BDI_CAP_EXEC_MAP) | 
|  | 957 | prot |= PROT_EXEC; | 
|  | 958 | } | 
|  | 959 | } | 
|  | 960 | else if ((prot & PROT_READ) && | 
|  | 961 | (prot & PROT_EXEC) && | 
|  | 962 | !(capabilities & BDI_CAP_EXEC_MAP) | 
|  | 963 | ) { | 
|  | 964 | /* backing file is not executable, try to copy */ | 
|  | 965 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
|  | 966 | } | 
|  | 967 | } | 
|  | 968 | else { | 
|  | 969 | /* anonymous mappings are always memory backed and can be | 
|  | 970 | * privately mapped | 
|  | 971 | */ | 
|  | 972 | capabilities = BDI_CAP_MAP_COPY; | 
|  | 973 |  | 
|  | 974 | /* handle PROT_EXEC implication by PROT_READ */ | 
|  | 975 | if ((prot & PROT_READ) && | 
|  | 976 | (current->personality & READ_IMPLIES_EXEC)) | 
|  | 977 | prot |= PROT_EXEC; | 
|  | 978 | } | 
|  | 979 |  | 
|  | 980 | /* allow the security API to have its say */ | 
| Eric Paris | ed03218 | 2007-06-28 15:55:21 -0400 | [diff] [blame] | 981 | ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | if (ret < 0) | 
|  | 983 | return ret; | 
|  | 984 |  | 
|  | 985 | /* looks okay */ | 
|  | 986 | *_capabilities = capabilities; | 
|  | 987 | return 0; | 
|  | 988 | } | 
|  | 989 |  | 
|  | 990 | /* | 
|  | 991 | * we've determined that we can make the mapping, now translate what we | 
|  | 992 | * now know into VMA flags | 
|  | 993 | */ | 
|  | 994 | static unsigned long determine_vm_flags(struct file *file, | 
|  | 995 | unsigned long prot, | 
|  | 996 | unsigned long flags, | 
|  | 997 | unsigned long capabilities) | 
|  | 998 | { | 
|  | 999 | unsigned long vm_flags; | 
|  | 1000 |  | 
|  | 1001 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); | 
|  | 1002 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | 
|  | 1003 | /* vm_flags |= mm->def_flags; */ | 
|  | 1004 |  | 
|  | 1005 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) { | 
|  | 1006 | /* attempt to share read-only copies of mapped file chunks */ | 
|  | 1007 | if (file && !(prot & PROT_WRITE)) | 
|  | 1008 | vm_flags |= VM_MAYSHARE; | 
|  | 1009 | } | 
|  | 1010 | else { | 
|  | 1011 | /* overlay a shareable mapping on the backing device or inode | 
|  | 1012 | * if possible - used for chardevs, ramfs/tmpfs/shmfs and | 
|  | 1013 | * romfs/cramfs */ | 
|  | 1014 | if (flags & MAP_SHARED) | 
|  | 1015 | vm_flags |= VM_MAYSHARE | VM_SHARED; | 
|  | 1016 | else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0) | 
|  | 1017 | vm_flags |= VM_MAYSHARE; | 
|  | 1018 | } | 
|  | 1019 |  | 
|  | 1020 | /* refuse to let anyone share private mappings with this process if | 
|  | 1021 | * it's being traced - otherwise breakpoints set in it may interfere | 
|  | 1022 | * with another untraced process | 
|  | 1023 | */ | 
| Roland McGrath | fa8e26c | 2008-07-25 19:45:50 -0700 | [diff] [blame] | 1024 | if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 | vm_flags &= ~VM_MAYSHARE; | 
|  | 1026 |  | 
|  | 1027 | return vm_flags; | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1031 | * set up a shared mapping on a file (the driver or filesystem provides and | 
|  | 1032 | * pins the storage) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1034 | static int do_mmap_shared_file(struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | { | 
|  | 1036 | int ret; | 
|  | 1037 |  | 
|  | 1038 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1039 | if (ret == 0) { | 
|  | 1040 | vma->vm_region->vm_top = vma->vm_region->vm_end; | 
|  | 1041 | return ret; | 
|  | 1042 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | if (ret != -ENOSYS) | 
|  | 1044 | return ret; | 
|  | 1045 |  | 
|  | 1046 | /* getting an ENOSYS error indicates that direct mmap isn't | 
|  | 1047 | * possible (as opposed to tried but failed) so we'll fall | 
|  | 1048 | * through to making a private copy of the data and mapping | 
|  | 1049 | * that if we can */ | 
|  | 1050 | return -ENODEV; | 
|  | 1051 | } | 
|  | 1052 |  | 
|  | 1053 | /* | 
|  | 1054 | * set up a private mapping or an anonymous shared mapping | 
|  | 1055 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1056 | static int do_mmap_private(struct vm_area_struct *vma, | 
|  | 1057 | struct vm_region *region, | 
|  | 1058 | unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1060 | struct page *pages; | 
|  | 1061 | unsigned long total, point, n, rlen; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | void *base; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1063 | int ret, order; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 |  | 
|  | 1065 | /* invoke the file's mapping function so that it can keep track of | 
|  | 1066 | * shared mappings on devices or memory | 
|  | 1067 | * - VM_MAYSHARE will be set if it may attempt to share | 
|  | 1068 | */ | 
|  | 1069 | if (vma->vm_file) { | 
|  | 1070 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1071 | if (ret == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | /* shouldn't return success if we're not sharing */ | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1073 | BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); | 
|  | 1074 | vma->vm_region->vm_top = vma->vm_region->vm_end; | 
|  | 1075 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | } | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1077 | if (ret != -ENOSYS) | 
|  | 1078 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 |  | 
|  | 1080 | /* getting an ENOSYS error indicates that direct mmap isn't | 
|  | 1081 | * possible (as opposed to tried but failed) so we'll try to | 
|  | 1082 | * make a private copy of the data and map that instead */ | 
|  | 1083 | } | 
|  | 1084 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1085 | rlen = PAGE_ALIGN(len); | 
|  | 1086 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | /* allocate some memory to hold the mapping | 
|  | 1088 | * - note that this may not return a page-aligned address if the object | 
|  | 1089 | *   we're allocating is smaller than a page | 
|  | 1090 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1091 | order = get_order(rlen); | 
|  | 1092 | kdebug("alloc order %d for %lx", order, len); | 
|  | 1093 |  | 
|  | 1094 | pages = alloc_pages(GFP_KERNEL, order); | 
|  | 1095 | if (!pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | goto enomem; | 
|  | 1097 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1098 | total = 1 << order; | 
|  | 1099 | atomic_add(total, &mmap_pages_allocated); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1101 | point = rlen >> PAGE_SHIFT; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1102 |  | 
|  | 1103 | /* we allocated a power-of-2 sized page set, so we may want to trim off | 
|  | 1104 | * the excess */ | 
|  | 1105 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | 
|  | 1106 | while (total > point) { | 
|  | 1107 | order = ilog2(total - point); | 
|  | 1108 | n = 1 << order; | 
|  | 1109 | kdebug("shave %lu/%lu @%lu", n, total - point, total); | 
|  | 1110 | atomic_sub(n, &mmap_pages_allocated); | 
|  | 1111 | total -= n; | 
|  | 1112 | set_page_refcounted(pages + total); | 
|  | 1113 | __free_pages(pages + total, order); | 
|  | 1114 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1115 | } | 
|  | 1116 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1117 | for (point = 1; point < total; point++) | 
|  | 1118 | set_page_refcounted(&pages[point]); | 
|  | 1119 |  | 
|  | 1120 | base = page_address(pages); | 
|  | 1121 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; | 
|  | 1122 | region->vm_start = (unsigned long) base; | 
|  | 1123 | region->vm_end   = region->vm_start + rlen; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1124 | region->vm_top   = region->vm_start + (total << PAGE_SHIFT); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1125 |  | 
|  | 1126 | vma->vm_start = region->vm_start; | 
|  | 1127 | vma->vm_end   = region->vm_start + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 |  | 
|  | 1129 | if (vma->vm_file) { | 
|  | 1130 | /* read the contents of a file into the copy */ | 
|  | 1131 | mm_segment_t old_fs; | 
|  | 1132 | loff_t fpos; | 
|  | 1133 |  | 
|  | 1134 | fpos = vma->vm_pgoff; | 
|  | 1135 | fpos <<= PAGE_SHIFT; | 
|  | 1136 |  | 
|  | 1137 | old_fs = get_fs(); | 
|  | 1138 | set_fs(KERNEL_DS); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1139 | ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | set_fs(old_fs); | 
|  | 1141 |  | 
|  | 1142 | if (ret < 0) | 
|  | 1143 | goto error_free; | 
|  | 1144 |  | 
|  | 1145 | /* clear the last little bit */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1146 | if (ret < rlen) | 
|  | 1147 | memset(base + ret, 0, rlen - ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 |  | 
|  | 1149 | } else { | 
|  | 1150 | /* if it's an anonymous mapping, then just clear it */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1151 | memset(base, 0, rlen); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | return 0; | 
|  | 1155 |  | 
|  | 1156 | error_free: | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1157 | free_page_series(region->vm_start, region->vm_end); | 
|  | 1158 | region->vm_start = vma->vm_start = 0; | 
|  | 1159 | region->vm_end   = vma->vm_end = 0; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1160 | region->vm_top   = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | return ret; | 
|  | 1162 |  | 
|  | 1163 | enomem: | 
|  | 1164 | printk("Allocation of length %lu from process %d failed\n", | 
|  | 1165 | len, current->pid); | 
|  | 1166 | show_free_areas(); | 
|  | 1167 | return -ENOMEM; | 
|  | 1168 | } | 
|  | 1169 |  | 
|  | 1170 | /* | 
|  | 1171 | * handle mapping creation for uClinux | 
|  | 1172 | */ | 
|  | 1173 | unsigned long do_mmap_pgoff(struct file *file, | 
|  | 1174 | unsigned long addr, | 
|  | 1175 | unsigned long len, | 
|  | 1176 | unsigned long prot, | 
|  | 1177 | unsigned long flags, | 
|  | 1178 | unsigned long pgoff) | 
|  | 1179 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1180 | struct vm_area_struct *vma; | 
|  | 1181 | struct vm_region *region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | struct rb_node *rb; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1183 | unsigned long capabilities, vm_flags, result; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | int ret; | 
|  | 1185 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1186 | kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); | 
|  | 1187 |  | 
| Eric Paris | 7cd9414 | 2007-11-26 18:47:40 -0500 | [diff] [blame] | 1188 | if (!(flags & MAP_FIXED)) | 
|  | 1189 | addr = round_hint_to_min(addr); | 
|  | 1190 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | /* decide whether we should attempt the mapping, and if so what sort of | 
|  | 1192 | * mapping */ | 
|  | 1193 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, | 
|  | 1194 | &capabilities); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1195 | if (ret < 0) { | 
|  | 1196 | kleave(" = %d [val]", ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | return ret; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1198 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 |  | 
|  | 1200 | /* we've determined that we can make the mapping, now translate what we | 
|  | 1201 | * now know into VMA flags */ | 
|  | 1202 | vm_flags = determine_vm_flags(file, prot, flags, capabilities); | 
|  | 1203 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1204 | /* we're going to need to record the mapping */ | 
|  | 1205 | region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); | 
|  | 1206 | if (!region) | 
|  | 1207 | goto error_getting_region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1209 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 
|  | 1210 | if (!vma) | 
|  | 1211 | goto error_getting_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1213 | atomic_set(®ion->vm_usage, 1); | 
|  | 1214 | region->vm_flags = vm_flags; | 
|  | 1215 | region->vm_pgoff = pgoff; | 
|  | 1216 |  | 
|  | 1217 | INIT_LIST_HEAD(&vma->anon_vma_node); | 
|  | 1218 | vma->vm_flags = vm_flags; | 
|  | 1219 | vma->vm_pgoff = pgoff; | 
|  | 1220 |  | 
|  | 1221 | if (file) { | 
|  | 1222 | region->vm_file = file; | 
|  | 1223 | get_file(file); | 
|  | 1224 | vma->vm_file = file; | 
|  | 1225 | get_file(file); | 
|  | 1226 | if (vm_flags & VM_EXECUTABLE) { | 
|  | 1227 | added_exe_file_vma(current->mm); | 
|  | 1228 | vma->vm_mm = current->mm; | 
|  | 1229 | } | 
|  | 1230 | } | 
|  | 1231 |  | 
|  | 1232 | down_write(&nommu_region_sem); | 
|  | 1233 |  | 
|  | 1234 | /* if we want to share, we need to check for regions created by other | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | * mmap() calls that overlap with our proposed mapping | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1236 | * - we can only share with a superset match on most regular files | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | * - shared mappings on character devices and memory backed files are | 
|  | 1238 | *   permitted to overlap inexactly as far as we are concerned for in | 
|  | 1239 | *   these cases, sharing is handled in the driver or filesystem rather | 
|  | 1240 | *   than here | 
|  | 1241 | */ | 
|  | 1242 | if (vm_flags & VM_MAYSHARE) { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1243 | struct vm_region *pregion; | 
|  | 1244 | unsigned long pglen, rpglen, pgend, rpgend, start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1246 | pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 1247 | pgend = pgoff + pglen; | 
| David Howells | 165b239 | 2007-03-22 00:11:24 -0800 | [diff] [blame] | 1248 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1249 | for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { | 
|  | 1250 | pregion = rb_entry(rb, struct vm_region, vm_rb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1252 | if (!(pregion->vm_flags & VM_MAYSHARE)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1253 | continue; | 
|  | 1254 |  | 
|  | 1255 | /* search for overlapping mappings on the same file */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1256 | if (pregion->vm_file->f_path.dentry->d_inode != | 
|  | 1257 | file->f_path.dentry->d_inode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 | continue; | 
|  | 1259 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1260 | if (pregion->vm_pgoff >= pgend) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | continue; | 
|  | 1262 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1263 | rpglen = pregion->vm_end - pregion->vm_start; | 
|  | 1264 | rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 1265 | rpgend = pregion->vm_pgoff + rpglen; | 
|  | 1266 | if (pgoff >= rpgend) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | continue; | 
|  | 1268 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1269 | /* handle inexactly overlapping matches between | 
|  | 1270 | * mappings */ | 
|  | 1271 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && | 
|  | 1272 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { | 
|  | 1273 | /* new mapping is not a subset of the region */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 
|  | 1275 | goto sharing_violation; | 
|  | 1276 | continue; | 
|  | 1277 | } | 
|  | 1278 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1279 | /* we've found a region we can share */ | 
|  | 1280 | atomic_inc(&pregion->vm_usage); | 
|  | 1281 | vma->vm_region = pregion; | 
|  | 1282 | start = pregion->vm_start; | 
|  | 1283 | start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; | 
|  | 1284 | vma->vm_start = start; | 
|  | 1285 | vma->vm_end = start + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1287 | if (pregion->vm_flags & VM_MAPPED_COPY) { | 
|  | 1288 | kdebug("share copy"); | 
|  | 1289 | vma->vm_flags |= VM_MAPPED_COPY; | 
|  | 1290 | } else { | 
|  | 1291 | kdebug("share mmap"); | 
|  | 1292 | ret = do_mmap_shared_file(vma); | 
|  | 1293 | if (ret < 0) { | 
|  | 1294 | vma->vm_region = NULL; | 
|  | 1295 | vma->vm_start = 0; | 
|  | 1296 | vma->vm_end = 0; | 
|  | 1297 | atomic_dec(&pregion->vm_usage); | 
|  | 1298 | pregion = NULL; | 
|  | 1299 | goto error_just_free; | 
|  | 1300 | } | 
|  | 1301 | } | 
|  | 1302 | fput(region->vm_file); | 
|  | 1303 | kmem_cache_free(vm_region_jar, region); | 
|  | 1304 | region = pregion; | 
|  | 1305 | result = start; | 
|  | 1306 | goto share; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1307 | } | 
|  | 1308 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 | /* obtain the address at which to make a shared mapping | 
|  | 1310 | * - this is the hook for quasi-memory character devices to | 
|  | 1311 | *   tell us the location of a shared mapping | 
|  | 1312 | */ | 
|  | 1313 | if (file && file->f_op->get_unmapped_area) { | 
|  | 1314 | addr = file->f_op->get_unmapped_area(file, addr, len, | 
|  | 1315 | pgoff, flags); | 
|  | 1316 | if (IS_ERR((void *) addr)) { | 
|  | 1317 | ret = addr; | 
|  | 1318 | if (ret != (unsigned long) -ENOSYS) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1319 | goto error_just_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 |  | 
|  | 1321 | /* the driver refused to tell us where to site | 
|  | 1322 | * the mapping so we'll have to attempt to copy | 
|  | 1323 | * it */ | 
|  | 1324 | ret = (unsigned long) -ENODEV; | 
|  | 1325 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1326 | goto error_just_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 |  | 
|  | 1328 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1329 | } else { | 
|  | 1330 | vma->vm_start = region->vm_start = addr; | 
|  | 1331 | vma->vm_end = region->vm_end = addr + len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1332 | } | 
|  | 1333 | } | 
|  | 1334 | } | 
|  | 1335 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1336 | vma->vm_region = region; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 |  | 
|  | 1338 | /* set up the mapping */ | 
|  | 1339 | if (file && vma->vm_flags & VM_SHARED) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1340 | ret = do_mmap_shared_file(vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1341 | else | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1342 | ret = do_mmap_private(vma, region, len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | if (ret < 0) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1344 | goto error_put_region; | 
|  | 1345 |  | 
|  | 1346 | add_nommu_region(region); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 |  | 
|  | 1348 | /* okay... we have a mapping; now we have to register it */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1349 | result = vma->vm_start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 | current->mm->total_vm += len >> PAGE_SHIFT; | 
|  | 1352 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1353 | share: | 
|  | 1354 | add_vma_to_mm(current->mm, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1356 | up_write(&nommu_region_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 |  | 
|  | 1358 | if (prot & PROT_EXEC) | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1359 | flush_icache_range(result, result + len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1361 | kleave(" = %lx", result); | 
|  | 1362 | return result; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1364 | error_put_region: | 
|  | 1365 | __put_nommu_region(region); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | if (vma) { | 
| Matt Helsley | 925d1c4 | 2008-04-29 01:01:36 -0700 | [diff] [blame] | 1367 | if (vma->vm_file) { | 
| Gavin Lambert | 3fcd03e | 2006-09-30 23:27:01 -0700 | [diff] [blame] | 1368 | fput(vma->vm_file); | 
| Matt Helsley | 925d1c4 | 2008-04-29 01:01:36 -0700 | [diff] [blame] | 1369 | if (vma->vm_flags & VM_EXECUTABLE) | 
|  | 1370 | removed_exe_file_vma(vma->vm_mm); | 
|  | 1371 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1372 | kmem_cache_free(vm_area_cachep, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1373 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1374 | kleave(" = %d [pr]", ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | return ret; | 
|  | 1376 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1377 | error_just_free: | 
|  | 1378 | up_write(&nommu_region_sem); | 
|  | 1379 | error: | 
|  | 1380 | fput(region->vm_file); | 
|  | 1381 | kmem_cache_free(vm_region_jar, region); | 
|  | 1382 | fput(vma->vm_file); | 
|  | 1383 | if (vma->vm_flags & VM_EXECUTABLE) | 
|  | 1384 | removed_exe_file_vma(vma->vm_mm); | 
|  | 1385 | kmem_cache_free(vm_area_cachep, vma); | 
|  | 1386 | kleave(" = %d", ret); | 
|  | 1387 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1389 | sharing_violation: | 
|  | 1390 | up_write(&nommu_region_sem); | 
|  | 1391 | printk(KERN_WARNING "Attempt to share mismatched mappings\n"); | 
|  | 1392 | ret = -EINVAL; | 
|  | 1393 | goto error; | 
|  | 1394 |  | 
|  | 1395 | error_getting_vma: | 
|  | 1396 | kmem_cache_free(vm_region_jar, region); | 
|  | 1397 | printk(KERN_WARNING "Allocation of vma for %lu byte allocation" | 
|  | 1398 | " from process %d failed\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | len, current->pid); | 
|  | 1400 | show_free_areas(); | 
|  | 1401 | return -ENOMEM; | 
|  | 1402 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1403 | error_getting_region: | 
|  | 1404 | printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" | 
|  | 1405 | " from process %d failed\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1406 | len, current->pid); | 
|  | 1407 | show_free_areas(); | 
|  | 1408 | return -ENOMEM; | 
|  | 1409 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1410 | EXPORT_SYMBOL(do_mmap_pgoff); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 |  | 
|  | 1412 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1413 | * split a vma into two pieces at address 'addr', a new vma is allocated either | 
|  | 1414 | * for the first part or the tail. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1416 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | 
|  | 1417 | unsigned long addr, int new_below) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1419 | struct vm_area_struct *new; | 
|  | 1420 | struct vm_region *region; | 
|  | 1421 | unsigned long npages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1422 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1423 | kenter(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1425 | /* we're only permitted to split anonymous regions that have a single | 
|  | 1426 | * owner */ | 
|  | 1427 | if (vma->vm_file || | 
|  | 1428 | atomic_read(&vma->vm_region->vm_usage) != 1) | 
|  | 1429 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1431 | if (mm->map_count >= sysctl_max_map_count) | 
|  | 1432 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1434 | region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); | 
|  | 1435 | if (!region) | 
|  | 1436 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1438 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 
|  | 1439 | if (!new) { | 
|  | 1440 | kmem_cache_free(vm_region_jar, region); | 
|  | 1441 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1443 |  | 
|  | 1444 | /* most fields are the same, copy all, and then fixup */ | 
|  | 1445 | *new = *vma; | 
|  | 1446 | *region = *vma->vm_region; | 
|  | 1447 | new->vm_region = region; | 
|  | 1448 |  | 
|  | 1449 | npages = (addr - vma->vm_start) >> PAGE_SHIFT; | 
|  | 1450 |  | 
|  | 1451 | if (new_below) { | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1452 | region->vm_top = region->vm_end = new->vm_end = addr; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1453 | } else { | 
|  | 1454 | region->vm_start = new->vm_start = addr; | 
|  | 1455 | region->vm_pgoff = new->vm_pgoff += npages; | 
|  | 1456 | } | 
|  | 1457 |  | 
|  | 1458 | if (new->vm_ops && new->vm_ops->open) | 
|  | 1459 | new->vm_ops->open(new); | 
|  | 1460 |  | 
|  | 1461 | delete_vma_from_mm(vma); | 
|  | 1462 | down_write(&nommu_region_sem); | 
|  | 1463 | delete_nommu_region(vma->vm_region); | 
|  | 1464 | if (new_below) { | 
|  | 1465 | vma->vm_region->vm_start = vma->vm_start = addr; | 
|  | 1466 | vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; | 
|  | 1467 | } else { | 
|  | 1468 | vma->vm_region->vm_end = vma->vm_end = addr; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1469 | vma->vm_region->vm_top = addr; | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1470 | } | 
|  | 1471 | add_nommu_region(vma->vm_region); | 
|  | 1472 | add_nommu_region(new->vm_region); | 
|  | 1473 | up_write(&nommu_region_sem); | 
|  | 1474 | add_vma_to_mm(mm, vma); | 
|  | 1475 | add_vma_to_mm(mm, new); | 
|  | 1476 | return 0; | 
|  | 1477 | } | 
|  | 1478 |  | 
|  | 1479 | /* | 
|  | 1480 | * shrink a VMA by removing the specified chunk from either the beginning or | 
|  | 1481 | * the end | 
|  | 1482 | */ | 
|  | 1483 | static int shrink_vma(struct mm_struct *mm, | 
|  | 1484 | struct vm_area_struct *vma, | 
|  | 1485 | unsigned long from, unsigned long to) | 
|  | 1486 | { | 
|  | 1487 | struct vm_region *region; | 
|  | 1488 |  | 
|  | 1489 | kenter(""); | 
|  | 1490 |  | 
|  | 1491 | /* adjust the VMA's pointers, which may reposition it in the MM's tree | 
|  | 1492 | * and list */ | 
|  | 1493 | delete_vma_from_mm(vma); | 
|  | 1494 | if (from > vma->vm_start) | 
|  | 1495 | vma->vm_end = from; | 
|  | 1496 | else | 
|  | 1497 | vma->vm_start = to; | 
|  | 1498 | add_vma_to_mm(mm, vma); | 
|  | 1499 |  | 
|  | 1500 | /* cut the backing region down to size */ | 
|  | 1501 | region = vma->vm_region; | 
|  | 1502 | BUG_ON(atomic_read(®ion->vm_usage) != 1); | 
|  | 1503 |  | 
|  | 1504 | down_write(&nommu_region_sem); | 
|  | 1505 | delete_nommu_region(region); | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1506 | if (from > region->vm_start) { | 
|  | 1507 | to = region->vm_top; | 
|  | 1508 | region->vm_top = region->vm_end = from; | 
|  | 1509 | } else { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1510 | region->vm_start = to; | 
| Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1511 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1512 | add_nommu_region(region); | 
|  | 1513 | up_write(&nommu_region_sem); | 
|  | 1514 |  | 
|  | 1515 | free_page_series(from, to); | 
|  | 1516 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | } | 
|  | 1518 |  | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1519 | /* | 
|  | 1520 | * release a mapping | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1521 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single | 
|  | 1522 | *   VMA, though it need not cover the whole VMA | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1523 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1524 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1525 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1526 | struct vm_area_struct *vma; | 
|  | 1527 | struct rb_node *rb; | 
|  | 1528 | unsigned long end = start + len; | 
|  | 1529 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1530 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1531 | kenter(",%lx,%zx", start, len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1533 | if (len == 0) | 
|  | 1534 | return -EINVAL; | 
|  | 1535 |  | 
|  | 1536 | /* find the first potentially overlapping VMA */ | 
|  | 1537 | vma = find_vma(mm, start); | 
|  | 1538 | if (!vma) { | 
|  | 1539 | printk(KERN_WARNING | 
|  | 1540 | "munmap of memory not mmapped by process %d (%s):" | 
|  | 1541 | " 0x%lx-0x%lx\n", | 
|  | 1542 | current->pid, current->comm, start, start + len - 1); | 
|  | 1543 | return -EINVAL; | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1544 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1546 | /* we're allowed to split an anonymous VMA but not a file-backed one */ | 
|  | 1547 | if (vma->vm_file) { | 
|  | 1548 | do { | 
|  | 1549 | if (start > vma->vm_start) { | 
|  | 1550 | kleave(" = -EINVAL [miss]"); | 
|  | 1551 | return -EINVAL; | 
|  | 1552 | } | 
|  | 1553 | if (end == vma->vm_end) | 
|  | 1554 | goto erase_whole_vma; | 
|  | 1555 | rb = rb_next(&vma->vm_rb); | 
|  | 1556 | vma = rb_entry(rb, struct vm_area_struct, vm_rb); | 
|  | 1557 | } while (rb); | 
|  | 1558 | kleave(" = -EINVAL [split file]"); | 
|  | 1559 | return -EINVAL; | 
|  | 1560 | } else { | 
|  | 1561 | /* the chunk must be a subset of the VMA found */ | 
|  | 1562 | if (start == vma->vm_start && end == vma->vm_end) | 
|  | 1563 | goto erase_whole_vma; | 
|  | 1564 | if (start < vma->vm_start || end > vma->vm_end) { | 
|  | 1565 | kleave(" = -EINVAL [superset]"); | 
|  | 1566 | return -EINVAL; | 
|  | 1567 | } | 
|  | 1568 | if (start & ~PAGE_MASK) { | 
|  | 1569 | kleave(" = -EINVAL [unaligned start]"); | 
|  | 1570 | return -EINVAL; | 
|  | 1571 | } | 
|  | 1572 | if (end != vma->vm_end && end & ~PAGE_MASK) { | 
|  | 1573 | kleave(" = -EINVAL [unaligned split]"); | 
|  | 1574 | return -EINVAL; | 
|  | 1575 | } | 
|  | 1576 | if (start != vma->vm_start && end != vma->vm_end) { | 
|  | 1577 | ret = split_vma(mm, vma, start, 1); | 
|  | 1578 | if (ret < 0) { | 
|  | 1579 | kleave(" = %d [split]", ret); | 
|  | 1580 | return ret; | 
|  | 1581 | } | 
|  | 1582 | } | 
|  | 1583 | return shrink_vma(mm, vma, start, end); | 
|  | 1584 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1585 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1586 | erase_whole_vma: | 
|  | 1587 | delete_vma_from_mm(vma); | 
|  | 1588 | delete_vma(mm, vma); | 
|  | 1589 | kleave(" = 0"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | return 0; | 
|  | 1591 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1592 | EXPORT_SYMBOL(do_munmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1594 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1595 | { | 
|  | 1596 | int ret; | 
|  | 1597 | struct mm_struct *mm = current->mm; | 
|  | 1598 |  | 
|  | 1599 | down_write(&mm->mmap_sem); | 
|  | 1600 | ret = do_munmap(mm, addr, len); | 
|  | 1601 | up_write(&mm->mmap_sem); | 
|  | 1602 | return ret; | 
|  | 1603 | } | 
|  | 1604 |  | 
|  | 1605 | /* | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1606 | * release all the mappings made in a process's VM space | 
| David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1607 | */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1608 | void exit_mmap(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | { | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1610 | struct vm_area_struct *vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1612 | if (!mm) | 
|  | 1613 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1614 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1615 | kenter(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1617 | mm->total_vm = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1619 | while ((vma = mm->mmap)) { | 
|  | 1620 | mm->mmap = vma->vm_next; | 
|  | 1621 | delete_vma_from_mm(vma); | 
|  | 1622 | delete_vma(mm, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | } | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1624 |  | 
|  | 1625 | kleave(""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | } | 
|  | 1627 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1628 | unsigned long do_brk(unsigned long addr, unsigned long len) | 
|  | 1629 | { | 
|  | 1630 | return -ENOMEM; | 
|  | 1631 | } | 
|  | 1632 |  | 
|  | 1633 | /* | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1634 | * expand (or shrink) an existing mapping, potentially moving it at the same | 
|  | 1635 | * time (controlled by the MREMAP_MAYMOVE flag and available VM space) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 | * | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1637 | * under NOMMU conditions, we only permit changing a mapping's size, and only | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1638 | * as long as it stays within the region allocated by do_mmap_private() and the | 
|  | 1639 | * block is not shareable | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 | * | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1641 | * MREMAP_FIXED is not supported under NOMMU conditions | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 | */ | 
|  | 1643 | unsigned long do_mremap(unsigned long addr, | 
|  | 1644 | unsigned long old_len, unsigned long new_len, | 
|  | 1645 | unsigned long flags, unsigned long new_addr) | 
|  | 1646 | { | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1647 | struct vm_area_struct *vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 |  | 
|  | 1649 | /* insanity checks first */ | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1650 | if (old_len == 0 || new_len == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | return (unsigned long) -EINVAL; | 
|  | 1652 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1653 | if (addr & ~PAGE_MASK) | 
|  | 1654 | return -EINVAL; | 
|  | 1655 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1656 | if (flags & MREMAP_FIXED && new_addr != addr) | 
|  | 1657 | return (unsigned long) -EINVAL; | 
|  | 1658 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1659 | vma = find_vma_exact(current->mm, addr, old_len); | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1660 | if (!vma) | 
|  | 1661 | return (unsigned long) -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 |  | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1663 | if (vma->vm_end != vma->vm_start + old_len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1664 | return (unsigned long) -EFAULT; | 
|  | 1665 |  | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1666 | if (vma->vm_flags & VM_MAYSHARE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | return (unsigned long) -EPERM; | 
|  | 1668 |  | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1669 | if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1670 | return (unsigned long) -ENOMEM; | 
|  | 1671 |  | 
|  | 1672 | /* all checks complete - do it */ | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1673 | vma->vm_end = vma->vm_start + new_len; | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1674 | return vma->vm_start; | 
|  | 1675 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1676 | EXPORT_SYMBOL(do_mremap); | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1677 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1678 | SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | 
|  | 1679 | unsigned long, new_len, unsigned long, flags, | 
|  | 1680 | unsigned long, new_addr) | 
| David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1681 | { | 
|  | 1682 | unsigned long ret; | 
|  | 1683 |  | 
|  | 1684 | down_write(¤t->mm->mmap_sem); | 
|  | 1685 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | 
|  | 1686 | up_write(¤t->mm->mmap_sem); | 
|  | 1687 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1688 | } | 
|  | 1689 |  | 
| Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1690 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | 
| Hugh Dickins | deceb6c | 2005-10-29 18:16:33 -0700 | [diff] [blame] | 1691 | unsigned int foll_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1692 | { | 
|  | 1693 | return NULL; | 
|  | 1694 | } | 
|  | 1695 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1696 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | 
|  | 1697 | unsigned long to, unsigned long size, pgprot_t prot) | 
|  | 1698 | { | 
| Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 1699 | vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; | 
|  | 1700 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 | } | 
| Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1702 | EXPORT_SYMBOL(remap_pfn_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 |  | 
| Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 1704 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 
|  | 1705 | unsigned long pgoff) | 
|  | 1706 | { | 
|  | 1707 | unsigned int size = vma->vm_end - vma->vm_start; | 
|  | 1708 |  | 
|  | 1709 | if (!(vma->vm_flags & VM_USERMAP)) | 
|  | 1710 | return -EINVAL; | 
|  | 1711 |  | 
|  | 1712 | vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); | 
|  | 1713 | vma->vm_end = vma->vm_start + size; | 
|  | 1714 |  | 
|  | 1715 | return 0; | 
|  | 1716 | } | 
|  | 1717 | EXPORT_SYMBOL(remap_vmalloc_range); | 
|  | 1718 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | 
|  | 1720 | { | 
|  | 1721 | } | 
|  | 1722 |  | 
|  | 1723 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, | 
|  | 1724 | unsigned long len, unsigned long pgoff, unsigned long flags) | 
|  | 1725 | { | 
|  | 1726 | return -ENOMEM; | 
|  | 1727 | } | 
|  | 1728 |  | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 1729 | void arch_unmap_area(struct mm_struct *mm, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1730 | { | 
|  | 1731 | } | 
|  | 1732 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 | void unmap_mapping_range(struct address_space *mapping, | 
|  | 1734 | loff_t const holebegin, loff_t const holelen, | 
|  | 1735 | int even_cows) | 
|  | 1736 | { | 
|  | 1737 | } | 
| Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1738 | EXPORT_SYMBOL(unmap_mapping_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1739 |  | 
|  | 1740 | /* | 
| David Howells | d56e03c | 2007-03-22 00:11:23 -0800 | [diff] [blame] | 1741 | * ask for an unmapped area at which to create a mapping on a file | 
|  | 1742 | */ | 
|  | 1743 | unsigned long get_unmapped_area(struct file *file, unsigned long addr, | 
|  | 1744 | unsigned long len, unsigned long pgoff, | 
|  | 1745 | unsigned long flags) | 
|  | 1746 | { | 
|  | 1747 | unsigned long (*get_area)(struct file *, unsigned long, unsigned long, | 
|  | 1748 | unsigned long, unsigned long); | 
|  | 1749 |  | 
|  | 1750 | get_area = current->mm->get_unmapped_area; | 
|  | 1751 | if (file && file->f_op && file->f_op->get_unmapped_area) | 
|  | 1752 | get_area = file->f_op->get_unmapped_area; | 
|  | 1753 |  | 
|  | 1754 | if (!get_area) | 
|  | 1755 | return -ENOSYS; | 
|  | 1756 |  | 
|  | 1757 | return get_area(file, addr, len, pgoff, flags); | 
|  | 1758 | } | 
| David Howells | d56e03c | 2007-03-22 00:11:23 -0800 | [diff] [blame] | 1759 | EXPORT_SYMBOL(get_unmapped_area); | 
|  | 1760 |  | 
|  | 1761 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | * Check that a process has enough memory to allocate a new virtual | 
|  | 1763 | * mapping. 0 means there is enough memory for the allocation to | 
|  | 1764 | * succeed and -ENOMEM implies there is not. | 
|  | 1765 | * | 
|  | 1766 | * We currently support three overcommit policies, which are set via the | 
|  | 1767 | * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting | 
|  | 1768 | * | 
|  | 1769 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | 
|  | 1770 | * Additional code 2002 Jul 20 by Robert Love. | 
|  | 1771 | * | 
|  | 1772 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | 
|  | 1773 | * | 
|  | 1774 | * Note this is a helper function intended to be used by LSMs which | 
|  | 1775 | * wish to use this logic. | 
|  | 1776 | */ | 
| Alan Cox | 34b4e4a | 2007-08-22 14:01:28 -0700 | [diff] [blame] | 1777 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1778 | { | 
|  | 1779 | unsigned long free, allowed; | 
|  | 1780 |  | 
|  | 1781 | vm_acct_memory(pages); | 
|  | 1782 |  | 
|  | 1783 | /* | 
|  | 1784 | * Sometimes we want to use more memory than we have | 
|  | 1785 | */ | 
|  | 1786 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | 
|  | 1787 | return 0; | 
|  | 1788 |  | 
|  | 1789 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 
|  | 1790 | unsigned long n; | 
|  | 1791 |  | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 1792 | free = global_page_state(NR_FILE_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1793 | free += nr_swap_pages; | 
|  | 1794 |  | 
|  | 1795 | /* | 
|  | 1796 | * Any slabs which are created with the | 
|  | 1797 | * SLAB_RECLAIM_ACCOUNT flag claim to have contents | 
|  | 1798 | * which are reclaimable, under pressure.  The dentry | 
|  | 1799 | * cache and most inode caches should fall into this | 
|  | 1800 | */ | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1801 | free += global_page_state(NR_SLAB_RECLAIMABLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1802 |  | 
|  | 1803 | /* | 
|  | 1804 | * Leave the last 3% for root | 
|  | 1805 | */ | 
|  | 1806 | if (!cap_sys_admin) | 
|  | 1807 | free -= free / 32; | 
|  | 1808 |  | 
|  | 1809 | if (free > pages) | 
|  | 1810 | return 0; | 
|  | 1811 |  | 
|  | 1812 | /* | 
|  | 1813 | * nr_free_pages() is very expensive on large systems, | 
|  | 1814 | * only call if we're about to fail. | 
|  | 1815 | */ | 
|  | 1816 | n = nr_free_pages(); | 
| Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1817 |  | 
|  | 1818 | /* | 
|  | 1819 | * Leave reserved pages. The pages are not for anonymous pages. | 
|  | 1820 | */ | 
|  | 1821 | if (n <= totalreserve_pages) | 
|  | 1822 | goto error; | 
|  | 1823 | else | 
|  | 1824 | n -= totalreserve_pages; | 
|  | 1825 |  | 
|  | 1826 | /* | 
|  | 1827 | * Leave the last 3% for root | 
|  | 1828 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 | if (!cap_sys_admin) | 
|  | 1830 | n -= n / 32; | 
|  | 1831 | free += n; | 
|  | 1832 |  | 
|  | 1833 | if (free > pages) | 
|  | 1834 | return 0; | 
| Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1835 |  | 
|  | 1836 | goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1837 | } | 
|  | 1838 |  | 
|  | 1839 | allowed = totalram_pages * sysctl_overcommit_ratio / 100; | 
|  | 1840 | /* | 
|  | 1841 | * Leave the last 3% for root | 
|  | 1842 | */ | 
|  | 1843 | if (!cap_sys_admin) | 
|  | 1844 | allowed -= allowed / 32; | 
|  | 1845 | allowed += total_swap_pages; | 
|  | 1846 |  | 
|  | 1847 | /* Don't let a single process grow too big: | 
|  | 1848 | leave 3% of the size of this process for other processes */ | 
| Alan Cox | 731572d | 2008-10-29 14:01:20 -0700 | [diff] [blame] | 1849 | if (mm) | 
|  | 1850 | allowed -= mm->total_vm / 32; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1851 |  | 
| Simon Derr | 2f60f8d | 2005-08-04 19:52:03 -0700 | [diff] [blame] | 1852 | /* | 
|  | 1853 | * cast `allowed' as a signed long because vm_committed_space | 
|  | 1854 | * sometimes has a negative value | 
|  | 1855 | */ | 
| Alan Cox | 80119ef | 2008-05-23 13:04:31 -0700 | [diff] [blame] | 1856 | if (atomic_long_read(&vm_committed_space) < (long)allowed) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1857 | return 0; | 
| Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1858 | error: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 | vm_unacct_memory(pages); | 
|  | 1860 |  | 
|  | 1861 | return -ENOMEM; | 
|  | 1862 | } | 
|  | 1863 |  | 
|  | 1864 | int in_gate_area_no_task(unsigned long addr) | 
|  | 1865 | { | 
|  | 1866 | return 0; | 
|  | 1867 | } | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1868 |  | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1869 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1870 | { | 
|  | 1871 | BUG(); | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1872 | return 0; | 
| David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1873 | } | 
| Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1874 | EXPORT_SYMBOL(filemap_fault); | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1875 |  | 
|  | 1876 | /* | 
|  | 1877 | * Access another process' address space. | 
|  | 1878 | * - source/target buffer must be kernel space | 
|  | 1879 | */ | 
|  | 1880 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | 
|  | 1881 | { | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1882 | struct vm_area_struct *vma; | 
|  | 1883 | struct mm_struct *mm; | 
|  | 1884 |  | 
|  | 1885 | if (addr + len < addr) | 
|  | 1886 | return 0; | 
|  | 1887 |  | 
|  | 1888 | mm = get_task_mm(tsk); | 
|  | 1889 | if (!mm) | 
|  | 1890 | return 0; | 
|  | 1891 |  | 
|  | 1892 | down_read(&mm->mmap_sem); | 
|  | 1893 |  | 
|  | 1894 | /* the access must start within one of the target process's mappings */ | 
| David Howells | 0159b14 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 1895 | vma = find_vma(mm, addr); | 
|  | 1896 | if (vma) { | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1897 | /* don't overrun this mapping */ | 
|  | 1898 | if (addr + len >= vma->vm_end) | 
|  | 1899 | len = vma->vm_end - addr; | 
|  | 1900 |  | 
|  | 1901 | /* only read or write mappings where it is permitted */ | 
| David Howells | d00c7b9 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1902 | if (write && vma->vm_flags & VM_MAYWRITE) | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1903 | len -= copy_to_user((void *) addr, buf, len); | 
| David Howells | d00c7b9 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1904 | else if (!write && vma->vm_flags & VM_MAYREAD) | 
| David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1905 | len -= copy_from_user(buf, (void *) addr, len); | 
|  | 1906 | else | 
|  | 1907 | len = 0; | 
|  | 1908 | } else { | 
|  | 1909 | len = 0; | 
|  | 1910 | } | 
|  | 1911 |  | 
|  | 1912 | up_read(&mm->mmap_sem); | 
|  | 1913 | mmput(mm); | 
|  | 1914 | return len; | 
|  | 1915 | } |