| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	linux/mm/mlock.c | 
 | 3 |  * | 
 | 4 |  *  (C) Copyright 1995 Linus Torvalds | 
 | 5 |  *  (C) Copyright 2002 Christoph Hellwig | 
 | 6 |  */ | 
 | 7 |  | 
| Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 8 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mman.h> | 
 | 10 | #include <linux/mm.h> | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 11 | #include <linux/swap.h> | 
 | 12 | #include <linux/swapops.h> | 
 | 13 | #include <linux/pagemap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mempolicy.h> | 
 | 15 | #include <linux/syscalls.h> | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 16 | #include <linux/sched.h> | 
 | 17 | #include <linux/module.h> | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 18 | #include <linux/rmap.h> | 
 | 19 | #include <linux/mmzone.h> | 
 | 20 | #include <linux/hugetlb.h> | 
 | 21 |  | 
 | 22 | #include "internal.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 24 | int can_do_mlock(void) | 
 | 25 | { | 
 | 26 | 	if (capable(CAP_IPC_LOCK)) | 
 | 27 | 		return 1; | 
| Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 28 | 	if (rlimit(RLIMIT_MEMLOCK) != 0) | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 29 | 		return 1; | 
 | 30 | 	return 0; | 
 | 31 | } | 
 | 32 | EXPORT_SYMBOL(can_do_mlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 34 | /* | 
 | 35 |  * Mlocked pages are marked with PageMlocked() flag for efficient testing | 
 | 36 |  * in vmscan and, possibly, the fault path; and to support semi-accurate | 
 | 37 |  * statistics. | 
 | 38 |  * | 
 | 39 |  * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will | 
 | 40 |  * be placed on the LRU "unevictable" list, rather than the [in]active lists. | 
 | 41 |  * The unevictable list is an LRU sibling list to the [in]active lists. | 
 | 42 |  * PageUnevictable is set to indicate the unevictable state. | 
 | 43 |  * | 
 | 44 |  * When lazy mlocking via vmscan, it is important to ensure that the | 
 | 45 |  * vma's VM_LOCKED status is not concurrently being modified, otherwise we | 
 | 46 |  * may have mlocked a page that is being munlocked. So lazy mlock must take | 
 | 47 |  * the mmap_sem for read, and verify that the vma really is locked | 
 | 48 |  * (see mm/rmap.c). | 
 | 49 |  */ | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  *  LRU accounting for clear_page_mlock() | 
 | 53 |  */ | 
 | 54 | void __clear_page_mlock(struct page *page) | 
 | 55 | { | 
 | 56 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 57 |  | 
 | 58 | 	if (!page->mapping) {	/* truncated ? */ | 
 | 59 | 		return; | 
 | 60 | 	} | 
 | 61 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 62 | 	dec_zone_page_state(page, NR_MLOCK); | 
 | 63 | 	count_vm_event(UNEVICTABLE_PGCLEARED); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 64 | 	if (!isolate_lru_page(page)) { | 
 | 65 | 		putback_lru_page(page); | 
 | 66 | 	} else { | 
 | 67 | 		/* | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 68 | 		 * We lost the race. the page already moved to evictable list. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 69 | 		 */ | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 70 | 		if (PageUnevictable(page)) | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 71 | 			count_vm_event(UNEVICTABLE_PGSTRANDED); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 72 | 	} | 
 | 73 | } | 
 | 74 |  | 
 | 75 | /* | 
 | 76 |  * Mark page as mlocked if not already. | 
 | 77 |  * If page on LRU, isolate and putback to move to unevictable list. | 
 | 78 |  */ | 
 | 79 | void mlock_vma_page(struct page *page) | 
 | 80 | { | 
 | 81 | 	BUG_ON(!PageLocked(page)); | 
 | 82 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 83 | 	if (!TestSetPageMlocked(page)) { | 
 | 84 | 		inc_zone_page_state(page, NR_MLOCK); | 
 | 85 | 		count_vm_event(UNEVICTABLE_PGMLOCKED); | 
 | 86 | 		if (!isolate_lru_page(page)) | 
 | 87 | 			putback_lru_page(page); | 
 | 88 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 89 | } | 
 | 90 |  | 
| Lee Schermerhorn | 6927c1d | 2009-12-14 17:59:55 -0800 | [diff] [blame] | 91 | /** | 
 | 92 |  * munlock_vma_page - munlock a vma page | 
 | 93 |  * @page - page to be unlocked | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 94 |  * | 
| Lee Schermerhorn | 6927c1d | 2009-12-14 17:59:55 -0800 | [diff] [blame] | 95 |  * called from munlock()/munmap() path with page supposedly on the LRU. | 
 | 96 |  * When we munlock a page, because the vma where we found the page is being | 
 | 97 |  * munlock()ed or munmap()ed, we want to check whether other vmas hold the | 
 | 98 |  * page locked so that we can leave it on the unevictable lru list and not | 
 | 99 |  * bother vmscan with it.  However, to walk the page's rmap list in | 
 | 100 |  * try_to_munlock() we must isolate the page from the LRU.  If some other | 
 | 101 |  * task has removed the page from the LRU, we won't be able to do that. | 
 | 102 |  * So we clear the PageMlocked as we might not get another chance.  If we | 
 | 103 |  * can't isolate the page, we leave it for putback_lru_page() and vmscan | 
 | 104 |  * [page_referenced()/try_to_unmap()] to deal with. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 105 |  */ | 
| Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 106 | void munlock_vma_page(struct page *page) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 107 | { | 
 | 108 | 	BUG_ON(!PageLocked(page)); | 
 | 109 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 110 | 	if (TestClearPageMlocked(page)) { | 
 | 111 | 		dec_zone_page_state(page, NR_MLOCK); | 
 | 112 | 		if (!isolate_lru_page(page)) { | 
 | 113 | 			int ret = try_to_munlock(page); | 
 | 114 | 			/* | 
 | 115 | 			 * did try_to_unlock() succeed or punt? | 
 | 116 | 			 */ | 
| Hugh Dickins | 53f79ac | 2009-12-14 17:58:58 -0800 | [diff] [blame] | 117 | 			if (ret != SWAP_MLOCK) | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 118 | 				count_vm_event(UNEVICTABLE_PGMUNLOCKED); | 
 | 119 |  | 
 | 120 | 			putback_lru_page(page); | 
 | 121 | 		} else { | 
 | 122 | 			/* | 
| Lee Schermerhorn | 6927c1d | 2009-12-14 17:59:55 -0800 | [diff] [blame] | 123 | 			 * Some other task has removed the page from the LRU. | 
 | 124 | 			 * putback_lru_page() will take care of removing the | 
 | 125 | 			 * page from the unevictable list, if necessary. | 
 | 126 | 			 * vmscan [page_referenced()] will move the page back | 
 | 127 | 			 * to the unevictable list if some other vma has it | 
 | 128 | 			 * mlocked. | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 129 | 			 */ | 
 | 130 | 			if (PageUnevictable(page)) | 
 | 131 | 				count_vm_event(UNEVICTABLE_PGSTRANDED); | 
 | 132 | 			else | 
 | 133 | 				count_vm_event(UNEVICTABLE_PGMUNLOCKED); | 
 | 134 | 		} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 135 | 	} | 
 | 136 | } | 
 | 137 |  | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 138 | /** | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 139 |  * __mlock_vma_pages_range() -  mlock a range of pages in the vma. | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 140 |  * @vma:   target vma | 
 | 141 |  * @start: start address | 
 | 142 |  * @end:   end address | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 143 |  * | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 144 |  * This takes care of making the pages present too. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 145 |  * | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 146 |  * return 0 on success, negative error code on error. | 
 | 147 |  * | 
 | 148 |  * vma->vm_mm->mmap_sem must be held for at least read. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 149 |  */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 150 | static long __mlock_vma_pages_range(struct vm_area_struct *vma, | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 151 | 				    unsigned long start, unsigned long end, | 
 | 152 | 				    int *nonblocking) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 153 | { | 
 | 154 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 155 | 	unsigned long addr = start; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 156 | 	int nr_pages = (end - start) / PAGE_SIZE; | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 157 | 	int gup_flags; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 158 |  | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 159 | 	VM_BUG_ON(start & ~PAGE_MASK); | 
 | 160 | 	VM_BUG_ON(end   & ~PAGE_MASK); | 
 | 161 | 	VM_BUG_ON(start < vma->vm_start); | 
 | 162 | 	VM_BUG_ON(end   > vma->vm_end); | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 163 | 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 164 |  | 
| Linus Torvalds | a1fde08 | 2011-05-04 21:30:28 -0700 | [diff] [blame] | 165 | 	gup_flags = FOLL_TOUCH | FOLL_MLOCK; | 
| Michel Lespinasse | 5ecfda0 | 2011-01-13 15:46:09 -0800 | [diff] [blame] | 166 | 	/* | 
 | 167 | 	 * We want to touch writable mappings with a write fault in order | 
 | 168 | 	 * to break COW, except for shared mappings because these don't COW | 
 | 169 | 	 * and we would not want to dirty them for nothing. | 
 | 170 | 	 */ | 
 | 171 | 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 172 | 		gup_flags |= FOLL_WRITE; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 173 |  | 
| Michel Lespinasse | fdf4c58 | 2011-01-31 17:03:41 -0800 | [diff] [blame] | 174 | 	/* | 
 | 175 | 	 * We want mlock to succeed for regions that have any permissions | 
 | 176 | 	 * other than PROT_NONE. | 
 | 177 | 	 */ | 
 | 178 | 	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | 
 | 179 | 		gup_flags |= FOLL_FORCE; | 
 | 180 |  | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 181 | 	return __get_user_pages(current, mm, addr, nr_pages, gup_flags, | 
 | 182 | 				NULL, NULL, nonblocking); | 
| Lee Schermerhorn | 9978ad5 | 2008-10-18 20:26:56 -0700 | [diff] [blame] | 183 | } | 
 | 184 |  | 
 | 185 | /* | 
 | 186 |  * convert get_user_pages() return value to posix mlock() error | 
 | 187 |  */ | 
 | 188 | static int __mlock_posix_error_return(long retval) | 
 | 189 | { | 
 | 190 | 	if (retval == -EFAULT) | 
 | 191 | 		retval = -ENOMEM; | 
 | 192 | 	else if (retval == -ENOMEM) | 
 | 193 | 		retval = -EAGAIN; | 
 | 194 | 	return retval; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 195 | } | 
 | 196 |  | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 197 | /** | 
 | 198 |  * mlock_vma_pages_range() - mlock pages in specified vma range. | 
 | 199 |  * @vma - the vma containing the specfied address range | 
 | 200 |  * @start - starting address in @vma to mlock | 
 | 201 |  * @end   - end address [+1] in @vma to mlock | 
 | 202 |  * | 
 | 203 |  * For mmap()/mremap()/expansion of mlocked vma. | 
 | 204 |  * | 
 | 205 |  * return 0 on success for "normal" vmas. | 
 | 206 |  * | 
 | 207 |  * return number of pages [> 0] to be removed from locked_vm on success | 
 | 208 |  * of "special" vmas. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 209 |  */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 210 | long mlock_vma_pages_range(struct vm_area_struct *vma, | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 211 | 			unsigned long start, unsigned long end) | 
 | 212 | { | 
 | 213 | 	int nr_pages = (end - start) / PAGE_SIZE; | 
 | 214 | 	BUG_ON(!(vma->vm_flags & VM_LOCKED)); | 
 | 215 |  | 
 | 216 | 	/* | 
 | 217 | 	 * filter unlockable vmas | 
 | 218 | 	 */ | 
 | 219 | 	if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | 
 | 220 | 		goto no_mlock; | 
 | 221 |  | 
 | 222 | 	if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || | 
 | 223 | 			is_vm_hugetlb_page(vma) || | 
| Stephen Wilson | 31db58b | 2011-03-13 15:49:15 -0400 | [diff] [blame] | 224 | 			vma == get_gate_vma(current->mm))) { | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 225 |  | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 226 | 		__mlock_vma_pages_range(vma, start, end, NULL); | 
| Hugh Dickins | d5b5623 | 2009-02-08 20:56:58 +0000 | [diff] [blame] | 227 |  | 
 | 228 | 		/* Hide errors from mmap() and other callers */ | 
 | 229 | 		return 0; | 
| Lee Schermerhorn | 8edb08c | 2008-10-18 20:26:49 -0700 | [diff] [blame] | 230 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 231 |  | 
 | 232 | 	/* | 
 | 233 | 	 * User mapped kernel pages or huge pages: | 
 | 234 | 	 * make these pages present to populate the ptes, but | 
 | 235 | 	 * fall thru' to reset VM_LOCKED--no need to unlock, and | 
 | 236 | 	 * return nr_pages so these don't get counted against task's | 
 | 237 | 	 * locked limit.  huge pages are already counted against | 
 | 238 | 	 * locked vm limit. | 
 | 239 | 	 */ | 
 | 240 | 	make_pages_present(start, end); | 
 | 241 |  | 
 | 242 | no_mlock: | 
 | 243 | 	vma->vm_flags &= ~VM_LOCKED;	/* and don't come back! */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 244 | 	return nr_pages;		/* error or pages NOT mlocked */ | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 245 | } | 
 | 246 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 247 | /* | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 248 |  * munlock_vma_pages_range() - munlock all pages in the vma range.' | 
 | 249 |  * @vma - vma containing range to be munlock()ed. | 
 | 250 |  * @start - start address in @vma of the range | 
 | 251 |  * @end - end of range in @vma. | 
 | 252 |  * | 
 | 253 |  *  For mremap(), munmap() and exit(). | 
 | 254 |  * | 
 | 255 |  * Called with @vma VM_LOCKED. | 
 | 256 |  * | 
 | 257 |  * Returns with VM_LOCKED cleared.  Callers must be prepared to | 
 | 258 |  * deal with this. | 
 | 259 |  * | 
 | 260 |  * We don't save and restore VM_LOCKED here because pages are | 
 | 261 |  * still on lru.  In unmap path, pages might be scanned by reclaim | 
 | 262 |  * and re-mlocked by try_to_{munlock|unmap} before we unmap and | 
 | 263 |  * free them.  This will result in freeing mlocked pages. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 264 |  */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 265 | void munlock_vma_pages_range(struct vm_area_struct *vma, | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 266 | 			     unsigned long start, unsigned long end) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 267 | { | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 268 | 	unsigned long addr; | 
 | 269 |  | 
 | 270 | 	lru_add_drain(); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 271 | 	vma->vm_flags &= ~VM_LOCKED; | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 272 |  | 
 | 273 | 	for (addr = start; addr < end; addr += PAGE_SIZE) { | 
| Hugh Dickins | 6e91971 | 2009-09-21 17:03:32 -0700 | [diff] [blame] | 274 | 		struct page *page; | 
 | 275 | 		/* | 
 | 276 | 		 * Although FOLL_DUMP is intended for get_dump_page(), | 
 | 277 | 		 * it just so happens that its special treatment of the | 
 | 278 | 		 * ZERO_PAGE (returning an error instead of doing get_page) | 
 | 279 | 		 * suits munlock very well (and if somehow an abnormal page | 
 | 280 | 		 * has sneaked into the range, we won't oops here: great). | 
 | 281 | 		 */ | 
 | 282 | 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); | 
 | 283 | 		if (page && !IS_ERR(page)) { | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 284 | 			lock_page(page); | 
| Hugh Dickins | 6e91971 | 2009-09-21 17:03:32 -0700 | [diff] [blame] | 285 | 			/* | 
 | 286 | 			 * Like in __mlock_vma_pages_range(), | 
 | 287 | 			 * because we lock page here and migration is | 
 | 288 | 			 * blocked by the elevated reference, we need | 
 | 289 | 			 * only check for file-cache page truncation. | 
 | 290 | 			 */ | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 291 | 			if (page->mapping) | 
 | 292 | 				munlock_vma_page(page); | 
 | 293 | 			unlock_page(page); | 
 | 294 | 			put_page(page); | 
 | 295 | 		} | 
 | 296 | 		cond_resched(); | 
 | 297 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 298 | } | 
 | 299 |  | 
 | 300 | /* | 
 | 301 |  * mlock_fixup  - handle mlock[all]/munlock[all] requests. | 
 | 302 |  * | 
 | 303 |  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and | 
 | 304 |  * munlock is a no-op.  However, for some special vmas, we go ahead and | 
 | 305 |  * populate the ptes via make_pages_present(). | 
 | 306 |  * | 
 | 307 |  * For vmas that pass the filters, merge/split as appropriate. | 
 | 308 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, | 
 | 310 | 	unsigned long start, unsigned long end, unsigned int newflags) | 
 | 311 | { | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 312 | 	struct mm_struct *mm = vma->vm_mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | 	pgoff_t pgoff; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 314 | 	int nr_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | 	int ret = 0; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 316 | 	int lock = newflags & VM_LOCKED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 |  | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 318 | 	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || | 
| Stephen Wilson | 31db58b | 2011-03-13 15:49:15 -0400 | [diff] [blame] | 319 | 	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 320 | 		goto out;	/* don't set VM_LOCKED,  don't count */ | 
 | 321 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | 
 | 323 | 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, | 
 | 324 | 			  vma->vm_file, pgoff, vma_policy(vma)); | 
 | 325 | 	if (*prev) { | 
 | 326 | 		vma = *prev; | 
 | 327 | 		goto success; | 
 | 328 | 	} | 
 | 329 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | 	if (start != vma->vm_start) { | 
 | 331 | 		ret = split_vma(mm, vma, start, 1); | 
 | 332 | 		if (ret) | 
 | 333 | 			goto out; | 
 | 334 | 	} | 
 | 335 |  | 
 | 336 | 	if (end != vma->vm_end) { | 
 | 337 | 		ret = split_vma(mm, vma, end, 0); | 
 | 338 | 		if (ret) | 
 | 339 | 			goto out; | 
 | 340 | 	} | 
 | 341 |  | 
 | 342 | success: | 
 | 343 | 	/* | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 344 | 	 * Keep track of amount of locked VM. | 
 | 345 | 	 */ | 
 | 346 | 	nr_pages = (end - start) >> PAGE_SHIFT; | 
 | 347 | 	if (!lock) | 
 | 348 | 		nr_pages = -nr_pages; | 
 | 349 | 	mm->locked_vm += nr_pages; | 
 | 350 |  | 
 | 351 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | 	 * vm_flags is protected by the mmap_sem held in write mode. | 
 | 353 | 	 * It's okay if try_to_unmap_one unmaps a page just after we | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 354 | 	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 |  | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 357 | 	if (lock) | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 358 | 		vma->vm_flags = newflags; | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 359 | 	else | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 360 | 		munlock_vma_pages_range(vma, start, end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | out: | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 363 | 	*prev = vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | 	return ret; | 
 | 365 | } | 
 | 366 |  | 
 | 367 | static int do_mlock(unsigned long start, size_t len, int on) | 
 | 368 | { | 
 | 369 | 	unsigned long nstart, end, tmp; | 
 | 370 | 	struct vm_area_struct * vma, * prev; | 
 | 371 | 	int error; | 
 | 372 |  | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 373 | 	VM_BUG_ON(start & ~PAGE_MASK); | 
 | 374 | 	VM_BUG_ON(len != PAGE_ALIGN(len)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | 	end = start + len; | 
 | 376 | 	if (end < start) | 
 | 377 | 		return -EINVAL; | 
 | 378 | 	if (end == start) | 
 | 379 | 		return 0; | 
 | 380 | 	vma = find_vma_prev(current->mm, start, &prev); | 
 | 381 | 	if (!vma || vma->vm_start > start) | 
 | 382 | 		return -ENOMEM; | 
 | 383 |  | 
 | 384 | 	if (start > vma->vm_start) | 
 | 385 | 		prev = vma; | 
 | 386 |  | 
 | 387 | 	for (nstart = start ; ; ) { | 
 | 388 | 		unsigned int newflags; | 
 | 389 |  | 
 | 390 | 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */ | 
 | 391 |  | 
 | 392 | 		newflags = vma->vm_flags | VM_LOCKED; | 
 | 393 | 		if (!on) | 
 | 394 | 			newflags &= ~VM_LOCKED; | 
 | 395 |  | 
 | 396 | 		tmp = vma->vm_end; | 
 | 397 | 		if (tmp > end) | 
 | 398 | 			tmp = end; | 
 | 399 | 		error = mlock_fixup(vma, &prev, nstart, tmp, newflags); | 
 | 400 | 		if (error) | 
 | 401 | 			break; | 
 | 402 | 		nstart = tmp; | 
 | 403 | 		if (nstart < prev->vm_end) | 
 | 404 | 			nstart = prev->vm_end; | 
 | 405 | 		if (nstart >= end) | 
 | 406 | 			break; | 
 | 407 |  | 
 | 408 | 		vma = prev->vm_next; | 
 | 409 | 		if (!vma || vma->vm_start != nstart) { | 
 | 410 | 			error = -ENOMEM; | 
 | 411 | 			break; | 
 | 412 | 		} | 
 | 413 | 	} | 
 | 414 | 	return error; | 
 | 415 | } | 
 | 416 |  | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 417 | static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors) | 
 | 418 | { | 
 | 419 | 	struct mm_struct *mm = current->mm; | 
 | 420 | 	unsigned long end, nstart, nend; | 
 | 421 | 	struct vm_area_struct *vma = NULL; | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 422 | 	int locked = 0; | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 423 | 	int ret = 0; | 
 | 424 |  | 
 | 425 | 	VM_BUG_ON(start & ~PAGE_MASK); | 
 | 426 | 	VM_BUG_ON(len != PAGE_ALIGN(len)); | 
 | 427 | 	end = start + len; | 
 | 428 |  | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 429 | 	for (nstart = start; nstart < end; nstart = nend) { | 
 | 430 | 		/* | 
 | 431 | 		 * We want to fault in pages for [nstart; end) address range. | 
 | 432 | 		 * Find first corresponding VMA. | 
 | 433 | 		 */ | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 434 | 		if (!locked) { | 
 | 435 | 			locked = 1; | 
 | 436 | 			down_read(&mm->mmap_sem); | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 437 | 			vma = find_vma(mm, nstart); | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 438 | 		} else if (nstart >= vma->vm_end) | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 439 | 			vma = vma->vm_next; | 
 | 440 | 		if (!vma || vma->vm_start >= end) | 
 | 441 | 			break; | 
 | 442 | 		/* | 
 | 443 | 		 * Set [nstart; nend) to intersection of desired address | 
 | 444 | 		 * range with the first VMA. Also, skip undesirable VMA types. | 
 | 445 | 		 */ | 
 | 446 | 		nend = min(end, vma->vm_end); | 
 | 447 | 		if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | 
 | 448 | 			continue; | 
 | 449 | 		if (nstart < vma->vm_start) | 
 | 450 | 			nstart = vma->vm_start; | 
 | 451 | 		/* | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 452 | 		 * Now fault in a range of pages. __mlock_vma_pages_range() | 
 | 453 | 		 * double checks the vma flags, so that it won't mlock pages | 
 | 454 | 		 * if the vma was already munlocked. | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 455 | 		 */ | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 456 | 		ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); | 
 | 457 | 		if (ret < 0) { | 
 | 458 | 			if (ignore_errors) { | 
 | 459 | 				ret = 0; | 
 | 460 | 				continue;	/* continue at next VMA */ | 
 | 461 | 			} | 
| Michel Lespinasse | 5fdb200 | 2011-01-13 15:46:12 -0800 | [diff] [blame] | 462 | 			ret = __mlock_posix_error_return(ret); | 
 | 463 | 			break; | 
 | 464 | 		} | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 465 | 		nend = nstart + ret * PAGE_SIZE; | 
 | 466 | 		ret = 0; | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 467 | 	} | 
| Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 468 | 	if (locked) | 
 | 469 | 		up_read(&mm->mmap_sem); | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 470 | 	return ret;	/* 0 or negative error code */ | 
 | 471 | } | 
 | 472 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 473 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | { | 
 | 475 | 	unsigned long locked; | 
 | 476 | 	unsigned long lock_limit; | 
 | 477 | 	int error = -ENOMEM; | 
 | 478 |  | 
 | 479 | 	if (!can_do_mlock()) | 
 | 480 | 		return -EPERM; | 
 | 481 |  | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 482 | 	lru_add_drain_all();	/* flush pagevec */ | 
 | 483 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | 	down_write(¤t->mm->mmap_sem); | 
 | 485 | 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); | 
 | 486 | 	start &= PAGE_MASK; | 
 | 487 |  | 
 | 488 | 	locked = len >> PAGE_SHIFT; | 
 | 489 | 	locked += current->mm->locked_vm; | 
 | 490 |  | 
| Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 491 | 	lock_limit = rlimit(RLIMIT_MEMLOCK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | 	lock_limit >>= PAGE_SHIFT; | 
 | 493 |  | 
 | 494 | 	/* check against resource limits */ | 
 | 495 | 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) | 
 | 496 | 		error = do_mlock(start, len, 1); | 
 | 497 | 	up_write(¤t->mm->mmap_sem); | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 498 | 	if (!error) | 
 | 499 | 		error = do_mlock_pages(start, len, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | 	return error; | 
 | 501 | } | 
 | 502 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 503 | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | { | 
 | 505 | 	int ret; | 
 | 506 |  | 
 | 507 | 	down_write(¤t->mm->mmap_sem); | 
 | 508 | 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); | 
 | 509 | 	start &= PAGE_MASK; | 
 | 510 | 	ret = do_mlock(start, len, 0); | 
 | 511 | 	up_write(¤t->mm->mmap_sem); | 
 | 512 | 	return ret; | 
 | 513 | } | 
 | 514 |  | 
 | 515 | static int do_mlockall(int flags) | 
 | 516 | { | 
 | 517 | 	struct vm_area_struct * vma, * prev = NULL; | 
 | 518 | 	unsigned int def_flags = 0; | 
 | 519 |  | 
 | 520 | 	if (flags & MCL_FUTURE) | 
 | 521 | 		def_flags = VM_LOCKED; | 
 | 522 | 	current->mm->def_flags = def_flags; | 
 | 523 | 	if (flags == MCL_FUTURE) | 
 | 524 | 		goto out; | 
 | 525 |  | 
 | 526 | 	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { | 
 | 527 | 		unsigned int newflags; | 
 | 528 |  | 
 | 529 | 		newflags = vma->vm_flags | VM_LOCKED; | 
 | 530 | 		if (!(flags & MCL_CURRENT)) | 
 | 531 | 			newflags &= ~VM_LOCKED; | 
 | 532 |  | 
 | 533 | 		/* Ignore errors */ | 
 | 534 | 		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); | 
 | 535 | 	} | 
 | 536 | out: | 
 | 537 | 	return 0; | 
 | 538 | } | 
 | 539 |  | 
| Heiko Carstens | 3480b25 | 2009-01-14 14:14:16 +0100 | [diff] [blame] | 540 | SYSCALL_DEFINE1(mlockall, int, flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | { | 
 | 542 | 	unsigned long lock_limit; | 
 | 543 | 	int ret = -EINVAL; | 
 | 544 |  | 
 | 545 | 	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) | 
 | 546 | 		goto out; | 
 | 547 |  | 
 | 548 | 	ret = -EPERM; | 
 | 549 | 	if (!can_do_mlock()) | 
 | 550 | 		goto out; | 
 | 551 |  | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 552 | 	lru_add_drain_all();	/* flush pagevec */ | 
 | 553 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | 	down_write(¤t->mm->mmap_sem); | 
 | 555 |  | 
| Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 556 | 	lock_limit = rlimit(RLIMIT_MEMLOCK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | 	lock_limit >>= PAGE_SHIFT; | 
 | 558 |  | 
 | 559 | 	ret = -ENOMEM; | 
 | 560 | 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || | 
 | 561 | 	    capable(CAP_IPC_LOCK)) | 
 | 562 | 		ret = do_mlockall(flags); | 
 | 563 | 	up_write(¤t->mm->mmap_sem); | 
| Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 564 | 	if (!ret && (flags & MCL_CURRENT)) { | 
 | 565 | 		/* Ignore errors */ | 
 | 566 | 		do_mlock_pages(0, TASK_SIZE, 1); | 
 | 567 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | out: | 
 | 569 | 	return ret; | 
 | 570 | } | 
 | 571 |  | 
| Heiko Carstens | 3480b25 | 2009-01-14 14:14:16 +0100 | [diff] [blame] | 572 | SYSCALL_DEFINE0(munlockall) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | { | 
 | 574 | 	int ret; | 
 | 575 |  | 
 | 576 | 	down_write(¤t->mm->mmap_sem); | 
 | 577 | 	ret = do_mlockall(0); | 
 | 578 | 	up_write(¤t->mm->mmap_sem); | 
 | 579 | 	return ret; | 
 | 580 | } | 
 | 581 |  | 
 | 582 | /* | 
 | 583 |  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB | 
 | 584 |  * shm segments) get accounted against the user_struct instead. | 
 | 585 |  */ | 
 | 586 | static DEFINE_SPINLOCK(shmlock_user_lock); | 
 | 587 |  | 
 | 588 | int user_shm_lock(size_t size, struct user_struct *user) | 
 | 589 | { | 
 | 590 | 	unsigned long lock_limit, locked; | 
 | 591 | 	int allowed = 0; | 
 | 592 |  | 
 | 593 | 	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
| Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 594 | 	lock_limit = rlimit(RLIMIT_MEMLOCK); | 
| Herbert van den Bergh | 5ed44a4 | 2007-07-15 23:38:25 -0700 | [diff] [blame] | 595 | 	if (lock_limit == RLIM_INFINITY) | 
 | 596 | 		allowed = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | 	lock_limit >>= PAGE_SHIFT; | 
 | 598 | 	spin_lock(&shmlock_user_lock); | 
| Herbert van den Bergh | 5ed44a4 | 2007-07-15 23:38:25 -0700 | [diff] [blame] | 599 | 	if (!allowed && | 
 | 600 | 	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | 		goto out; | 
 | 602 | 	get_uid(user); | 
 | 603 | 	user->locked_shm += locked; | 
 | 604 | 	allowed = 1; | 
 | 605 | out: | 
 | 606 | 	spin_unlock(&shmlock_user_lock); | 
 | 607 | 	return allowed; | 
 | 608 | } | 
 | 609 |  | 
 | 610 | void user_shm_unlock(size_t size, struct user_struct *user) | 
 | 611 | { | 
 | 612 | 	spin_lock(&shmlock_user_lock); | 
 | 613 | 	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 614 | 	spin_unlock(&shmlock_user_lock); | 
 | 615 | 	free_uid(user); | 
 | 616 | } |