| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	linux/mm/mlock.c | 
 | 3 |  * | 
 | 4 |  *  (C) Copyright 1995 Linus Torvalds | 
 | 5 |  *  (C) Copyright 2002 Christoph Hellwig | 
 | 6 |  */ | 
 | 7 |  | 
| Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 8 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mman.h> | 
 | 10 | #include <linux/mm.h> | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 11 | #include <linux/swap.h> | 
 | 12 | #include <linux/swapops.h> | 
 | 13 | #include <linux/pagemap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mempolicy.h> | 
 | 15 | #include <linux/syscalls.h> | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 16 | #include <linux/sched.h> | 
 | 17 | #include <linux/module.h> | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 18 | #include <linux/rmap.h> | 
 | 19 | #include <linux/mmzone.h> | 
 | 20 | #include <linux/hugetlb.h> | 
 | 21 |  | 
 | 22 | #include "internal.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 24 | int can_do_mlock(void) | 
 | 25 | { | 
 | 26 | 	if (capable(CAP_IPC_LOCK)) | 
 | 27 | 		return 1; | 
 | 28 | 	if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0) | 
 | 29 | 		return 1; | 
 | 30 | 	return 0; | 
 | 31 | } | 
 | 32 | EXPORT_SYMBOL(can_do_mlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 34 | /* | 
 | 35 |  * Mlocked pages are marked with PageMlocked() flag for efficient testing | 
 | 36 |  * in vmscan and, possibly, the fault path; and to support semi-accurate | 
 | 37 |  * statistics. | 
 | 38 |  * | 
 | 39 |  * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will | 
 | 40 |  * be placed on the LRU "unevictable" list, rather than the [in]active lists. | 
 | 41 |  * The unevictable list is an LRU sibling list to the [in]active lists. | 
 | 42 |  * PageUnevictable is set to indicate the unevictable state. | 
 | 43 |  * | 
 | 44 |  * When lazy mlocking via vmscan, it is important to ensure that the | 
 | 45 |  * vma's VM_LOCKED status is not concurrently being modified, otherwise we | 
 | 46 |  * may have mlocked a page that is being munlocked. So lazy mlock must take | 
 | 47 |  * the mmap_sem for read, and verify that the vma really is locked | 
 | 48 |  * (see mm/rmap.c). | 
 | 49 |  */ | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  *  LRU accounting for clear_page_mlock() | 
 | 53 |  */ | 
 | 54 | void __clear_page_mlock(struct page *page) | 
 | 55 | { | 
 | 56 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 57 |  | 
 | 58 | 	if (!page->mapping) {	/* truncated ? */ | 
 | 59 | 		return; | 
 | 60 | 	} | 
 | 61 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 62 | 	dec_zone_page_state(page, NR_MLOCK); | 
 | 63 | 	count_vm_event(UNEVICTABLE_PGCLEARED); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 64 | 	if (!isolate_lru_page(page)) { | 
 | 65 | 		putback_lru_page(page); | 
 | 66 | 	} else { | 
 | 67 | 		/* | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 68 | 		 * We lost the race. the page already moved to evictable list. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 69 | 		 */ | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 70 | 		if (PageUnevictable(page)) | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 71 | 			count_vm_event(UNEVICTABLE_PGSTRANDED); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 72 | 	} | 
 | 73 | } | 
 | 74 |  | 
 | 75 | /* | 
 | 76 |  * Mark page as mlocked if not already. | 
 | 77 |  * If page on LRU, isolate and putback to move to unevictable list. | 
 | 78 |  */ | 
 | 79 | void mlock_vma_page(struct page *page) | 
 | 80 | { | 
 | 81 | 	BUG_ON(!PageLocked(page)); | 
 | 82 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 83 | 	if (!TestSetPageMlocked(page)) { | 
 | 84 | 		inc_zone_page_state(page, NR_MLOCK); | 
 | 85 | 		count_vm_event(UNEVICTABLE_PGMLOCKED); | 
 | 86 | 		if (!isolate_lru_page(page)) | 
 | 87 | 			putback_lru_page(page); | 
 | 88 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 89 | } | 
 | 90 |  | 
 | 91 | /* | 
 | 92 |  * called from munlock()/munmap() path with page supposedly on the LRU. | 
 | 93 |  * | 
 | 94 |  * Note:  unlike mlock_vma_page(), we can't just clear the PageMlocked | 
 | 95 |  * [in try_to_munlock()] and then attempt to isolate the page.  We must | 
 | 96 |  * isolate the page to keep others from messing with its unevictable | 
 | 97 |  * and mlocked state while trying to munlock.  However, we pre-clear the | 
 | 98 |  * mlocked state anyway as we might lose the isolation race and we might | 
 | 99 |  * not get another chance to clear PageMlocked.  If we successfully | 
 | 100 |  * isolate the page and try_to_munlock() detects other VM_LOCKED vmas | 
 | 101 |  * mapping the page, it will restore the PageMlocked state, unless the page | 
 | 102 |  * is mapped in a non-linear vma.  So, we go ahead and SetPageMlocked(), | 
 | 103 |  * perhaps redundantly. | 
 | 104 |  * If we lose the isolation race, and the page is mapped by other VM_LOCKED | 
 | 105 |  * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() | 
 | 106 |  * either of which will restore the PageMlocked state by calling | 
 | 107 |  * mlock_vma_page() above, if it can grab the vma's mmap sem. | 
 | 108 |  */ | 
 | 109 | static void munlock_vma_page(struct page *page) | 
 | 110 | { | 
 | 111 | 	BUG_ON(!PageLocked(page)); | 
 | 112 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 113 | 	if (TestClearPageMlocked(page)) { | 
 | 114 | 		dec_zone_page_state(page, NR_MLOCK); | 
 | 115 | 		if (!isolate_lru_page(page)) { | 
 | 116 | 			int ret = try_to_munlock(page); | 
 | 117 | 			/* | 
 | 118 | 			 * did try_to_unlock() succeed or punt? | 
 | 119 | 			 */ | 
 | 120 | 			if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN) | 
 | 121 | 				count_vm_event(UNEVICTABLE_PGMUNLOCKED); | 
 | 122 |  | 
 | 123 | 			putback_lru_page(page); | 
 | 124 | 		} else { | 
 | 125 | 			/* | 
 | 126 | 			 * We lost the race.  let try_to_unmap() deal | 
 | 127 | 			 * with it.  At least we get the page state and | 
 | 128 | 			 * mlock stats right.  However, page is still on | 
 | 129 | 			 * the noreclaim list.  We'll fix that up when | 
 | 130 | 			 * the page is eventually freed or we scan the | 
 | 131 | 			 * noreclaim list. | 
 | 132 | 			 */ | 
 | 133 | 			if (PageUnevictable(page)) | 
 | 134 | 				count_vm_event(UNEVICTABLE_PGSTRANDED); | 
 | 135 | 			else | 
 | 136 | 				count_vm_event(UNEVICTABLE_PGMUNLOCKED); | 
 | 137 | 		} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 138 | 	} | 
 | 139 | } | 
 | 140 |  | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 141 | /** | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 142 |  * __mlock_vma_pages_range() -  mlock a range of pages in the vma. | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 143 |  * @vma:   target vma | 
 | 144 |  * @start: start address | 
 | 145 |  * @end:   end address | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 146 |  * | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 147 |  * This takes care of making the pages present too. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 148 |  * | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 149 |  * return 0 on success, negative error code on error. | 
 | 150 |  * | 
 | 151 |  * vma->vm_mm->mmap_sem must be held for at least read. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 152 |  */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 153 | static long __mlock_vma_pages_range(struct vm_area_struct *vma, | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 154 | 				    unsigned long start, unsigned long end) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 155 | { | 
 | 156 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 157 | 	unsigned long addr = start; | 
 | 158 | 	struct page *pages[16]; /* 16 gives a reasonable batch */ | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 159 | 	int nr_pages = (end - start) / PAGE_SIZE; | 
| Helge Deller | 72eb8c6 | 2008-11-17 00:30:57 +0100 | [diff] [blame] | 160 | 	int ret = 0; | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 161 | 	int gup_flags; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 162 |  | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 163 | 	VM_BUG_ON(start & ~PAGE_MASK); | 
 | 164 | 	VM_BUG_ON(end   & ~PAGE_MASK); | 
 | 165 | 	VM_BUG_ON(start < vma->vm_start); | 
 | 166 | 	VM_BUG_ON(end   > vma->vm_end); | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 167 | 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 168 |  | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 169 | 	gup_flags = FOLL_TOUCH | FOLL_GET; | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 170 | 	if (vma->vm_flags & VM_WRITE) | 
| Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 171 | 		gup_flags |= FOLL_WRITE; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 172 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 173 | 	while (nr_pages > 0) { | 
 | 174 | 		int i; | 
 | 175 |  | 
 | 176 | 		cond_resched(); | 
 | 177 |  | 
 | 178 | 		/* | 
 | 179 | 		 * get_user_pages makes pages present if we are | 
 | 180 | 		 * setting mlock. and this extra reference count will | 
 | 181 | 		 * disable migration of this page.  However, page may | 
 | 182 | 		 * still be truncated out from under us. | 
 | 183 | 		 */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 184 | 		ret = __get_user_pages(current, mm, addr, | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 185 | 				min_t(int, nr_pages, ARRAY_SIZE(pages)), | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 186 | 				gup_flags, pages, NULL); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 187 | 		/* | 
 | 188 | 		 * This can happen for, e.g., VM_NONLINEAR regions before | 
 | 189 | 		 * a page has been allocated and mapped at a given offset, | 
 | 190 | 		 * or for addresses that map beyond end of a file. | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 191 | 		 * We'll mlock the pages if/when they get faulted in. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 192 | 		 */ | 
 | 193 | 		if (ret < 0) | 
 | 194 | 			break; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 195 |  | 
 | 196 | 		lru_add_drain();	/* push cached pages to LRU */ | 
 | 197 |  | 
 | 198 | 		for (i = 0; i < ret; i++) { | 
 | 199 | 			struct page *page = pages[i]; | 
 | 200 |  | 
| Hugh Dickins | 6e91971 | 2009-09-21 17:03:32 -0700 | [diff] [blame] | 201 | 			if (page->mapping) { | 
 | 202 | 				/* | 
 | 203 | 				 * That preliminary check is mainly to avoid | 
 | 204 | 				 * the pointless overhead of lock_page on the | 
 | 205 | 				 * ZERO_PAGE: which might bounce very badly if | 
 | 206 | 				 * there is contention.  However, we're still | 
 | 207 | 				 * dirtying its cacheline with get/put_page: | 
 | 208 | 				 * we'll add another __get_user_pages flag to | 
 | 209 | 				 * avoid it if that case turns out to matter. | 
 | 210 | 				 */ | 
 | 211 | 				lock_page(page); | 
 | 212 | 				/* | 
 | 213 | 				 * Because we lock page here and migration is | 
 | 214 | 				 * blocked by the elevated reference, we need | 
 | 215 | 				 * only check for file-cache page truncation. | 
 | 216 | 				 */ | 
 | 217 | 				if (page->mapping) | 
 | 218 | 					mlock_vma_page(page); | 
 | 219 | 				unlock_page(page); | 
 | 220 | 			} | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 221 | 			put_page(page);	/* ref from get_user_pages() */ | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 222 | 		} | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 223 |  | 
 | 224 | 		addr += ret * PAGE_SIZE; | 
 | 225 | 		nr_pages -= ret; | 
| Lee Schermerhorn | 9978ad5 | 2008-10-18 20:26:56 -0700 | [diff] [blame] | 226 | 		ret = 0; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 227 | 	} | 
 | 228 |  | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 229 | 	return ret;	/* 0 or negative error code */ | 
| Lee Schermerhorn | 9978ad5 | 2008-10-18 20:26:56 -0700 | [diff] [blame] | 230 | } | 
 | 231 |  | 
 | 232 | /* | 
 | 233 |  * convert get_user_pages() return value to posix mlock() error | 
 | 234 |  */ | 
 | 235 | static int __mlock_posix_error_return(long retval) | 
 | 236 | { | 
 | 237 | 	if (retval == -EFAULT) | 
 | 238 | 		retval = -ENOMEM; | 
 | 239 | 	else if (retval == -ENOMEM) | 
 | 240 | 		retval = -EAGAIN; | 
 | 241 | 	return retval; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 242 | } | 
 | 243 |  | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 244 | /** | 
 | 245 |  * mlock_vma_pages_range() - mlock pages in specified vma range. | 
 | 246 |  * @vma - the vma containing the specfied address range | 
 | 247 |  * @start - starting address in @vma to mlock | 
 | 248 |  * @end   - end address [+1] in @vma to mlock | 
 | 249 |  * | 
 | 250 |  * For mmap()/mremap()/expansion of mlocked vma. | 
 | 251 |  * | 
 | 252 |  * return 0 on success for "normal" vmas. | 
 | 253 |  * | 
 | 254 |  * return number of pages [> 0] to be removed from locked_vm on success | 
 | 255 |  * of "special" vmas. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 256 |  */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 257 | long mlock_vma_pages_range(struct vm_area_struct *vma, | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 258 | 			unsigned long start, unsigned long end) | 
 | 259 | { | 
 | 260 | 	int nr_pages = (end - start) / PAGE_SIZE; | 
 | 261 | 	BUG_ON(!(vma->vm_flags & VM_LOCKED)); | 
 | 262 |  | 
 | 263 | 	/* | 
 | 264 | 	 * filter unlockable vmas | 
 | 265 | 	 */ | 
 | 266 | 	if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | 
 | 267 | 		goto no_mlock; | 
 | 268 |  | 
 | 269 | 	if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || | 
 | 270 | 			is_vm_hugetlb_page(vma) || | 
| Lee Schermerhorn | 8edb08c | 2008-10-18 20:26:49 -0700 | [diff] [blame] | 271 | 			vma == get_gate_vma(current))) { | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 272 |  | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 273 | 		__mlock_vma_pages_range(vma, start, end); | 
| Hugh Dickins | d5b5623 | 2009-02-08 20:56:58 +0000 | [diff] [blame] | 274 |  | 
 | 275 | 		/* Hide errors from mmap() and other callers */ | 
 | 276 | 		return 0; | 
| Lee Schermerhorn | 8edb08c | 2008-10-18 20:26:49 -0700 | [diff] [blame] | 277 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 278 |  | 
 | 279 | 	/* | 
 | 280 | 	 * User mapped kernel pages or huge pages: | 
 | 281 | 	 * make these pages present to populate the ptes, but | 
 | 282 | 	 * fall thru' to reset VM_LOCKED--no need to unlock, and | 
 | 283 | 	 * return nr_pages so these don't get counted against task's | 
 | 284 | 	 * locked limit.  huge pages are already counted against | 
 | 285 | 	 * locked vm limit. | 
 | 286 | 	 */ | 
 | 287 | 	make_pages_present(start, end); | 
 | 288 |  | 
 | 289 | no_mlock: | 
 | 290 | 	vma->vm_flags &= ~VM_LOCKED;	/* and don't come back! */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 291 | 	return nr_pages;		/* error or pages NOT mlocked */ | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 292 | } | 
 | 293 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 294 | /* | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 295 |  * munlock_vma_pages_range() - munlock all pages in the vma range.' | 
 | 296 |  * @vma - vma containing range to be munlock()ed. | 
 | 297 |  * @start - start address in @vma of the range | 
 | 298 |  * @end - end of range in @vma. | 
 | 299 |  * | 
 | 300 |  *  For mremap(), munmap() and exit(). | 
 | 301 |  * | 
 | 302 |  * Called with @vma VM_LOCKED. | 
 | 303 |  * | 
 | 304 |  * Returns with VM_LOCKED cleared.  Callers must be prepared to | 
 | 305 |  * deal with this. | 
 | 306 |  * | 
 | 307 |  * We don't save and restore VM_LOCKED here because pages are | 
 | 308 |  * still on lru.  In unmap path, pages might be scanned by reclaim | 
 | 309 |  * and re-mlocked by try_to_{munlock|unmap} before we unmap and | 
 | 310 |  * free them.  This will result in freeing mlocked pages. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 311 |  */ | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 312 | void munlock_vma_pages_range(struct vm_area_struct *vma, | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 313 | 			     unsigned long start, unsigned long end) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 314 | { | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 315 | 	unsigned long addr; | 
 | 316 |  | 
 | 317 | 	lru_add_drain(); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 318 | 	vma->vm_flags &= ~VM_LOCKED; | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 319 |  | 
 | 320 | 	for (addr = start; addr < end; addr += PAGE_SIZE) { | 
| Hugh Dickins | 6e91971 | 2009-09-21 17:03:32 -0700 | [diff] [blame] | 321 | 		struct page *page; | 
 | 322 | 		/* | 
 | 323 | 		 * Although FOLL_DUMP is intended for get_dump_page(), | 
 | 324 | 		 * it just so happens that its special treatment of the | 
 | 325 | 		 * ZERO_PAGE (returning an error instead of doing get_page) | 
 | 326 | 		 * suits munlock very well (and if somehow an abnormal page | 
 | 327 | 		 * has sneaked into the range, we won't oops here: great). | 
 | 328 | 		 */ | 
 | 329 | 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); | 
 | 330 | 		if (page && !IS_ERR(page)) { | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 331 | 			lock_page(page); | 
| Hugh Dickins | 6e91971 | 2009-09-21 17:03:32 -0700 | [diff] [blame] | 332 | 			/* | 
 | 333 | 			 * Like in __mlock_vma_pages_range(), | 
 | 334 | 			 * because we lock page here and migration is | 
 | 335 | 			 * blocked by the elevated reference, we need | 
 | 336 | 			 * only check for file-cache page truncation. | 
 | 337 | 			 */ | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 338 | 			if (page->mapping) | 
 | 339 | 				munlock_vma_page(page); | 
 | 340 | 			unlock_page(page); | 
 | 341 | 			put_page(page); | 
 | 342 | 		} | 
 | 343 | 		cond_resched(); | 
 | 344 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 345 | } | 
 | 346 |  | 
 | 347 | /* | 
 | 348 |  * mlock_fixup  - handle mlock[all]/munlock[all] requests. | 
 | 349 |  * | 
 | 350 |  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and | 
 | 351 |  * munlock is a no-op.  However, for some special vmas, we go ahead and | 
 | 352 |  * populate the ptes via make_pages_present(). | 
 | 353 |  * | 
 | 354 |  * For vmas that pass the filters, merge/split as appropriate. | 
 | 355 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, | 
 | 357 | 	unsigned long start, unsigned long end, unsigned int newflags) | 
 | 358 | { | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 359 | 	struct mm_struct *mm = vma->vm_mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | 	pgoff_t pgoff; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 361 | 	int nr_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | 	int ret = 0; | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 363 | 	int lock = newflags & VM_LOCKED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 365 | 	if (newflags == vma->vm_flags || | 
 | 366 | 			(vma->vm_flags & (VM_IO | VM_PFNMAP))) | 
 | 367 | 		goto out;	/* don't set VM_LOCKED,  don't count */ | 
 | 368 |  | 
 | 369 | 	if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || | 
 | 370 | 			is_vm_hugetlb_page(vma) || | 
 | 371 | 			vma == get_gate_vma(current)) { | 
 | 372 | 		if (lock) | 
 | 373 | 			make_pages_present(start, end); | 
 | 374 | 		goto out;	/* don't set VM_LOCKED,  don't count */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | 	} | 
 | 376 |  | 
 | 377 | 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | 
 | 378 | 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, | 
 | 379 | 			  vma->vm_file, pgoff, vma_policy(vma)); | 
 | 380 | 	if (*prev) { | 
 | 381 | 		vma = *prev; | 
 | 382 | 		goto success; | 
 | 383 | 	} | 
 | 384 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | 	if (start != vma->vm_start) { | 
 | 386 | 		ret = split_vma(mm, vma, start, 1); | 
 | 387 | 		if (ret) | 
 | 388 | 			goto out; | 
 | 389 | 	} | 
 | 390 |  | 
 | 391 | 	if (end != vma->vm_end) { | 
 | 392 | 		ret = split_vma(mm, vma, end, 0); | 
 | 393 | 		if (ret) | 
 | 394 | 			goto out; | 
 | 395 | 	} | 
 | 396 |  | 
 | 397 | success: | 
 | 398 | 	/* | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 399 | 	 * Keep track of amount of locked VM. | 
 | 400 | 	 */ | 
 | 401 | 	nr_pages = (end - start) >> PAGE_SHIFT; | 
 | 402 | 	if (!lock) | 
 | 403 | 		nr_pages = -nr_pages; | 
 | 404 | 	mm->locked_vm += nr_pages; | 
 | 405 |  | 
 | 406 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | 	 * vm_flags is protected by the mmap_sem held in write mode. | 
 | 408 | 	 * It's okay if try_to_unmap_one unmaps a page just after we | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 409 | 	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 412 | 	if (lock) { | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 413 | 		vma->vm_flags = newflags; | 
 | 414 | 		ret = __mlock_vma_pages_range(vma, start, end); | 
 | 415 | 		if (ret < 0) | 
 | 416 | 			ret = __mlock_posix_error_return(ret); | 
| Lee Schermerhorn | 8edb08c | 2008-10-18 20:26:49 -0700 | [diff] [blame] | 417 | 	} else { | 
| Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 418 | 		munlock_vma_pages_range(vma, start, end); | 
| Lee Schermerhorn | 8edb08c | 2008-10-18 20:26:49 -0700 | [diff] [blame] | 419 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | out: | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 422 | 	*prev = vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | 	return ret; | 
 | 424 | } | 
 | 425 |  | 
 | 426 | static int do_mlock(unsigned long start, size_t len, int on) | 
 | 427 | { | 
 | 428 | 	unsigned long nstart, end, tmp; | 
 | 429 | 	struct vm_area_struct * vma, * prev; | 
 | 430 | 	int error; | 
 | 431 |  | 
 | 432 | 	len = PAGE_ALIGN(len); | 
 | 433 | 	end = start + len; | 
 | 434 | 	if (end < start) | 
 | 435 | 		return -EINVAL; | 
 | 436 | 	if (end == start) | 
 | 437 | 		return 0; | 
 | 438 | 	vma = find_vma_prev(current->mm, start, &prev); | 
 | 439 | 	if (!vma || vma->vm_start > start) | 
 | 440 | 		return -ENOMEM; | 
 | 441 |  | 
 | 442 | 	if (start > vma->vm_start) | 
 | 443 | 		prev = vma; | 
 | 444 |  | 
 | 445 | 	for (nstart = start ; ; ) { | 
 | 446 | 		unsigned int newflags; | 
 | 447 |  | 
 | 448 | 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */ | 
 | 449 |  | 
 | 450 | 		newflags = vma->vm_flags | VM_LOCKED; | 
 | 451 | 		if (!on) | 
 | 452 | 			newflags &= ~VM_LOCKED; | 
 | 453 |  | 
 | 454 | 		tmp = vma->vm_end; | 
 | 455 | 		if (tmp > end) | 
 | 456 | 			tmp = end; | 
 | 457 | 		error = mlock_fixup(vma, &prev, nstart, tmp, newflags); | 
 | 458 | 		if (error) | 
 | 459 | 			break; | 
 | 460 | 		nstart = tmp; | 
 | 461 | 		if (nstart < prev->vm_end) | 
 | 462 | 			nstart = prev->vm_end; | 
 | 463 | 		if (nstart >= end) | 
 | 464 | 			break; | 
 | 465 |  | 
 | 466 | 		vma = prev->vm_next; | 
 | 467 | 		if (!vma || vma->vm_start != nstart) { | 
 | 468 | 			error = -ENOMEM; | 
 | 469 | 			break; | 
 | 470 | 		} | 
 | 471 | 	} | 
 | 472 | 	return error; | 
 | 473 | } | 
 | 474 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 475 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | { | 
 | 477 | 	unsigned long locked; | 
 | 478 | 	unsigned long lock_limit; | 
 | 479 | 	int error = -ENOMEM; | 
 | 480 |  | 
 | 481 | 	if (!can_do_mlock()) | 
 | 482 | 		return -EPERM; | 
 | 483 |  | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 484 | 	lru_add_drain_all();	/* flush pagevec */ | 
 | 485 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | 	down_write(¤t->mm->mmap_sem); | 
 | 487 | 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); | 
 | 488 | 	start &= PAGE_MASK; | 
 | 489 |  | 
 | 490 | 	locked = len >> PAGE_SHIFT; | 
 | 491 | 	locked += current->mm->locked_vm; | 
 | 492 |  | 
 | 493 | 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 
 | 494 | 	lock_limit >>= PAGE_SHIFT; | 
 | 495 |  | 
 | 496 | 	/* check against resource limits */ | 
 | 497 | 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) | 
 | 498 | 		error = do_mlock(start, len, 1); | 
 | 499 | 	up_write(¤t->mm->mmap_sem); | 
 | 500 | 	return error; | 
 | 501 | } | 
 | 502 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 503 | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | { | 
 | 505 | 	int ret; | 
 | 506 |  | 
 | 507 | 	down_write(¤t->mm->mmap_sem); | 
 | 508 | 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); | 
 | 509 | 	start &= PAGE_MASK; | 
 | 510 | 	ret = do_mlock(start, len, 0); | 
 | 511 | 	up_write(¤t->mm->mmap_sem); | 
 | 512 | 	return ret; | 
 | 513 | } | 
 | 514 |  | 
 | 515 | static int do_mlockall(int flags) | 
 | 516 | { | 
 | 517 | 	struct vm_area_struct * vma, * prev = NULL; | 
 | 518 | 	unsigned int def_flags = 0; | 
 | 519 |  | 
 | 520 | 	if (flags & MCL_FUTURE) | 
 | 521 | 		def_flags = VM_LOCKED; | 
 | 522 | 	current->mm->def_flags = def_flags; | 
 | 523 | 	if (flags == MCL_FUTURE) | 
 | 524 | 		goto out; | 
 | 525 |  | 
 | 526 | 	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { | 
 | 527 | 		unsigned int newflags; | 
 | 528 |  | 
 | 529 | 		newflags = vma->vm_flags | VM_LOCKED; | 
 | 530 | 		if (!(flags & MCL_CURRENT)) | 
 | 531 | 			newflags &= ~VM_LOCKED; | 
 | 532 |  | 
 | 533 | 		/* Ignore errors */ | 
 | 534 | 		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); | 
 | 535 | 	} | 
 | 536 | out: | 
 | 537 | 	return 0; | 
 | 538 | } | 
 | 539 |  | 
| Heiko Carstens | 3480b25 | 2009-01-14 14:14:16 +0100 | [diff] [blame] | 540 | SYSCALL_DEFINE1(mlockall, int, flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | { | 
 | 542 | 	unsigned long lock_limit; | 
 | 543 | 	int ret = -EINVAL; | 
 | 544 |  | 
 | 545 | 	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) | 
 | 546 | 		goto out; | 
 | 547 |  | 
 | 548 | 	ret = -EPERM; | 
 | 549 | 	if (!can_do_mlock()) | 
 | 550 | 		goto out; | 
 | 551 |  | 
| KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 552 | 	lru_add_drain_all();	/* flush pagevec */ | 
 | 553 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | 	down_write(¤t->mm->mmap_sem); | 
 | 555 |  | 
 | 556 | 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 
 | 557 | 	lock_limit >>= PAGE_SHIFT; | 
 | 558 |  | 
 | 559 | 	ret = -ENOMEM; | 
 | 560 | 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || | 
 | 561 | 	    capable(CAP_IPC_LOCK)) | 
 | 562 | 		ret = do_mlockall(flags); | 
 | 563 | 	up_write(¤t->mm->mmap_sem); | 
 | 564 | out: | 
 | 565 | 	return ret; | 
 | 566 | } | 
 | 567 |  | 
| Heiko Carstens | 3480b25 | 2009-01-14 14:14:16 +0100 | [diff] [blame] | 568 | SYSCALL_DEFINE0(munlockall) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | { | 
 | 570 | 	int ret; | 
 | 571 |  | 
 | 572 | 	down_write(¤t->mm->mmap_sem); | 
 | 573 | 	ret = do_mlockall(0); | 
 | 574 | 	up_write(¤t->mm->mmap_sem); | 
 | 575 | 	return ret; | 
 | 576 | } | 
 | 577 |  | 
 | 578 | /* | 
 | 579 |  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB | 
 | 580 |  * shm segments) get accounted against the user_struct instead. | 
 | 581 |  */ | 
 | 582 | static DEFINE_SPINLOCK(shmlock_user_lock); | 
 | 583 |  | 
 | 584 | int user_shm_lock(size_t size, struct user_struct *user) | 
 | 585 | { | 
 | 586 | 	unsigned long lock_limit, locked; | 
 | 587 | 	int allowed = 0; | 
 | 588 |  | 
 | 589 | 	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 590 | 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 
| Herbert van den Bergh | 5ed44a4 | 2007-07-15 23:38:25 -0700 | [diff] [blame] | 591 | 	if (lock_limit == RLIM_INFINITY) | 
 | 592 | 		allowed = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | 	lock_limit >>= PAGE_SHIFT; | 
 | 594 | 	spin_lock(&shmlock_user_lock); | 
| Herbert van den Bergh | 5ed44a4 | 2007-07-15 23:38:25 -0700 | [diff] [blame] | 595 | 	if (!allowed && | 
 | 596 | 	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | 		goto out; | 
 | 598 | 	get_uid(user); | 
 | 599 | 	user->locked_shm += locked; | 
 | 600 | 	allowed = 1; | 
 | 601 | out: | 
 | 602 | 	spin_unlock(&shmlock_user_lock); | 
 | 603 | 	return allowed; | 
 | 604 | } | 
 | 605 |  | 
 | 606 | void user_shm_unlock(size_t size, struct user_struct *user) | 
 | 607 | { | 
 | 608 | 	spin_lock(&shmlock_user_lock); | 
 | 609 | 	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 610 | 	spin_unlock(&shmlock_user_lock); | 
 | 611 | 	free_uid(user); | 
 | 612 | } | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 613 |  | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 614 | int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | 
 | 615 | 			  size_t size) | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 616 | { | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 617 | 	unsigned long lim, vm, pgsz; | 
 | 618 | 	int error = -ENOMEM; | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 619 |  | 
 | 620 | 	pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
 | 621 |  | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 622 | 	down_write(&mm->mmap_sem); | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 623 |  | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 624 | 	lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; | 
 | 625 | 	vm   = mm->total_vm + pgsz; | 
 | 626 | 	if (lim < vm) | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 627 | 		goto out; | 
 | 628 |  | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 629 | 	lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; | 
 | 630 | 	vm   = mm->locked_vm + pgsz; | 
 | 631 | 	if (lim < vm) | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 632 | 		goto out; | 
 | 633 |  | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 634 | 	mm->total_vm  += pgsz; | 
 | 635 | 	mm->locked_vm += pgsz; | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 636 |  | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 637 | 	error = 0; | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 638 |  out: | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 639 | 	up_write(&mm->mmap_sem); | 
 | 640 | 	return error; | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 641 | } | 
 | 642 |  | 
| Markus Metzger | 1cb81b1 | 2009-04-24 09:51:43 +0200 | [diff] [blame] | 643 | void refund_locked_memory(struct mm_struct *mm, size_t size) | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 644 | { | 
 | 645 | 	unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
 | 646 |  | 
| Markus Metzger | e2b371f | 2009-04-03 16:43:35 +0200 | [diff] [blame] | 647 | 	down_write(&mm->mmap_sem); | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 648 |  | 
| Markus Metzger | e2b371f | 2009-04-03 16:43:35 +0200 | [diff] [blame] | 649 | 	mm->total_vm  -= pgsz; | 
 | 650 | 	mm->locked_vm -= pgsz; | 
| Markus Metzger | c5dee61 | 2008-12-19 15:17:02 +0100 | [diff] [blame] | 651 |  | 
| Markus Metzger | e2b371f | 2009-04-03 16:43:35 +0200 | [diff] [blame] | 652 | 	up_write(&mm->mmap_sem); | 
| Markus Metzger | 9f339e7 | 2009-02-11 15:10:27 +0100 | [diff] [blame] | 653 | } |