| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * mm/rmap.c - physical to virtual reverse mappings | 
 | 3 |  * | 
 | 4 |  * Copyright 2001, Rik van Riel <riel@conectiva.com.br> | 
 | 5 |  * Released under the General Public License (GPL). | 
 | 6 |  * | 
 | 7 |  * Simple, low overhead reverse mapping scheme. | 
 | 8 |  * Please try to keep this thing as modular as possible. | 
 | 9 |  * | 
 | 10 |  * Provides methods for unmapping each kind of mapped page: | 
 | 11 |  * the anon methods track anonymous pages, and | 
 | 12 |  * the file methods track pages belonging to an inode. | 
 | 13 |  * | 
 | 14 |  * Original design by Rik van Riel <riel@conectiva.com.br> 2001 | 
 | 15 |  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 | 
 | 16 |  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 | 
 | 17 |  * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | /* | 
 | 21 |  * Lock ordering in mm: | 
 | 22 |  * | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 23 |  * inode->i_mutex	(while writing or truncating, not reading or faulting) | 
| Nick Piggin | 82591e6 | 2006-10-19 23:29:10 -0700 | [diff] [blame] | 24 |  *   inode->i_alloc_sem (vmtruncate_range) | 
 | 25 |  *   mm->mmap_sem | 
 | 26 |  *     page->flags PG_locked (lock_page) | 
 | 27 |  *       mapping->i_mmap_lock | 
 | 28 |  *         anon_vma->lock | 
 | 29 |  *           mm->page_table_lock or pte_lock | 
 | 30 |  *             zone->lru_lock (in mark_page_accessed, isolate_lru_page) | 
 | 31 |  *             swap_lock (in swap_duplicate, swap_info_get) | 
 | 32 |  *               mmlist_lock (in mmput, drain_mmlist and others) | 
 | 33 |  *               mapping->private_lock (in __set_page_dirty_buffers) | 
 | 34 |  *               inode_lock (in set_page_dirty's __mark_inode_dirty) | 
 | 35 |  *                 sb_lock (within inode_lock in fs/fs-writeback.c) | 
 | 36 |  *                 mapping->tree_lock (widely used, in set_page_dirty, | 
 | 37 |  *                           in arch-dependent flush_dcache_mmap_lock, | 
 | 38 |  *                           within inode_lock in __sync_single_inode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  */ | 
 | 40 |  | 
 | 41 | #include <linux/mm.h> | 
 | 42 | #include <linux/pagemap.h> | 
 | 43 | #include <linux/swap.h> | 
 | 44 | #include <linux/swapops.h> | 
 | 45 | #include <linux/slab.h> | 
 | 46 | #include <linux/init.h> | 
 | 47 | #include <linux/rmap.h> | 
 | 48 | #include <linux/rcupdate.h> | 
| Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 49 | #include <linux/module.h> | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 50 | #include <linux/kallsyms.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 |  | 
 | 52 | #include <asm/tlbflush.h> | 
 | 53 |  | 
| Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 54 | struct kmem_cache *anon_vma_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | /* This must be called under the mmap_sem. */ | 
 | 57 | int anon_vma_prepare(struct vm_area_struct *vma) | 
 | 58 | { | 
 | 59 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 60 |  | 
 | 61 | 	might_sleep(); | 
 | 62 | 	if (unlikely(!anon_vma)) { | 
 | 63 | 		struct mm_struct *mm = vma->vm_mm; | 
 | 64 | 		struct anon_vma *allocated, *locked; | 
 | 65 |  | 
 | 66 | 		anon_vma = find_mergeable_anon_vma(vma); | 
 | 67 | 		if (anon_vma) { | 
 | 68 | 			allocated = NULL; | 
 | 69 | 			locked = anon_vma; | 
 | 70 | 			spin_lock(&locked->lock); | 
 | 71 | 		} else { | 
 | 72 | 			anon_vma = anon_vma_alloc(); | 
 | 73 | 			if (unlikely(!anon_vma)) | 
 | 74 | 				return -ENOMEM; | 
 | 75 | 			allocated = anon_vma; | 
 | 76 | 			locked = NULL; | 
 | 77 | 		} | 
 | 78 |  | 
 | 79 | 		/* page_table_lock to protect against threads */ | 
 | 80 | 		spin_lock(&mm->page_table_lock); | 
 | 81 | 		if (likely(!vma->anon_vma)) { | 
 | 82 | 			vma->anon_vma = anon_vma; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 83 | 			list_add_tail(&vma->anon_vma_node, &anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | 			allocated = NULL; | 
 | 85 | 		} | 
 | 86 | 		spin_unlock(&mm->page_table_lock); | 
 | 87 |  | 
 | 88 | 		if (locked) | 
 | 89 | 			spin_unlock(&locked->lock); | 
 | 90 | 		if (unlikely(allocated)) | 
 | 91 | 			anon_vma_free(allocated); | 
 | 92 | 	} | 
 | 93 | 	return 0; | 
 | 94 | } | 
 | 95 |  | 
 | 96 | void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) | 
 | 97 | { | 
 | 98 | 	BUG_ON(vma->anon_vma != next->anon_vma); | 
 | 99 | 	list_del(&next->anon_vma_node); | 
 | 100 | } | 
 | 101 |  | 
 | 102 | void __anon_vma_link(struct vm_area_struct *vma) | 
 | 103 | { | 
 | 104 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 105 |  | 
| Hugh Dickins | 30acbab | 2007-06-27 14:09:53 -0700 | [diff] [blame] | 106 | 	if (anon_vma) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 107 | 		list_add_tail(&vma->anon_vma_node, &anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | } | 
 | 109 |  | 
 | 110 | void anon_vma_link(struct vm_area_struct *vma) | 
 | 111 | { | 
 | 112 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 113 |  | 
 | 114 | 	if (anon_vma) { | 
 | 115 | 		spin_lock(&anon_vma->lock); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 116 | 		list_add_tail(&vma->anon_vma_node, &anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | 		spin_unlock(&anon_vma->lock); | 
 | 118 | 	} | 
 | 119 | } | 
 | 120 |  | 
 | 121 | void anon_vma_unlink(struct vm_area_struct *vma) | 
 | 122 | { | 
 | 123 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 124 | 	int empty; | 
 | 125 |  | 
 | 126 | 	if (!anon_vma) | 
 | 127 | 		return; | 
 | 128 |  | 
 | 129 | 	spin_lock(&anon_vma->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | 	list_del(&vma->anon_vma_node); | 
 | 131 |  | 
 | 132 | 	/* We must garbage collect the anon_vma if it's empty */ | 
 | 133 | 	empty = list_empty(&anon_vma->head); | 
 | 134 | 	spin_unlock(&anon_vma->lock); | 
 | 135 |  | 
 | 136 | 	if (empty) | 
 | 137 | 		anon_vma_free(anon_vma); | 
 | 138 | } | 
 | 139 |  | 
| Christoph Lameter | 4ba9b9d | 2007-10-16 23:25:51 -0700 | [diff] [blame] | 140 | static void anon_vma_ctor(struct kmem_cache *cachep, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { | 
| Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 142 | 	struct anon_vma *anon_vma = data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 |  | 
| Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 144 | 	spin_lock_init(&anon_vma->lock); | 
 | 145 | 	INIT_LIST_HEAD(&anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | } | 
 | 147 |  | 
 | 148 | void __init anon_vma_init(void) | 
 | 149 | { | 
 | 150 | 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 151 | 			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | } | 
 | 153 |  | 
 | 154 | /* | 
 | 155 |  * Getting a lock on a stable anon_vma from a page off the LRU is | 
 | 156 |  * tricky: page_lock_anon_vma rely on RCU to guard against the races. | 
 | 157 |  */ | 
 | 158 | static struct anon_vma *page_lock_anon_vma(struct page *page) | 
 | 159 | { | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 160 | 	struct anon_vma *anon_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | 	unsigned long anon_mapping; | 
 | 162 |  | 
 | 163 | 	rcu_read_lock(); | 
 | 164 | 	anon_mapping = (unsigned long) page->mapping; | 
 | 165 | 	if (!(anon_mapping & PAGE_MAPPING_ANON)) | 
 | 166 | 		goto out; | 
 | 167 | 	if (!page_mapped(page)) | 
 | 168 | 		goto out; | 
 | 169 |  | 
 | 170 | 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | 
 | 171 | 	spin_lock(&anon_vma->lock); | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 172 | 	return anon_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | out: | 
 | 174 | 	rcu_read_unlock(); | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 175 | 	return NULL; | 
 | 176 | } | 
 | 177 |  | 
 | 178 | static void page_unlock_anon_vma(struct anon_vma *anon_vma) | 
 | 179 | { | 
 | 180 | 	spin_unlock(&anon_vma->lock); | 
 | 181 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } | 
 | 183 |  | 
 | 184 | /* | 
| Lee Schermerhorn | 3ad33b2 | 2007-11-14 16:59:10 -0800 | [diff] [blame] | 185 |  * At what user virtual address is page expected in @vma? | 
 | 186 |  * Returns virtual address or -EFAULT if page's index/offset is not | 
 | 187 |  * within the range mapped the @vma. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  */ | 
 | 189 | static inline unsigned long | 
 | 190 | vma_address(struct page *page, struct vm_area_struct *vma) | 
 | 191 | { | 
 | 192 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 193 | 	unsigned long address; | 
 | 194 |  | 
 | 195 | 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 
 | 196 | 	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { | 
| Lee Schermerhorn | 3ad33b2 | 2007-11-14 16:59:10 -0800 | [diff] [blame] | 197 | 		/* page should be within @vma mapping range */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | 		return -EFAULT; | 
 | 199 | 	} | 
 | 200 | 	return address; | 
 | 201 | } | 
 | 202 |  | 
 | 203 | /* | 
 | 204 |  * At what user virtual address is page expected in vma? checking that the | 
| Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 205 |  * page matches the vma: currently only used on anon pages, by unuse_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 |  */ | 
 | 207 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 
 | 208 | { | 
 | 209 | 	if (PageAnon(page)) { | 
 | 210 | 		if ((void *)vma->anon_vma != | 
 | 211 | 		    (void *)page->mapping - PAGE_MAPPING_ANON) | 
 | 212 | 			return -EFAULT; | 
 | 213 | 	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | 
| Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 214 | 		if (!vma->vm_file || | 
 | 215 | 		    vma->vm_file->f_mapping != page->mapping) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | 			return -EFAULT; | 
 | 217 | 	} else | 
 | 218 | 		return -EFAULT; | 
 | 219 | 	return vma_address(page, vma); | 
 | 220 | } | 
 | 221 |  | 
 | 222 | /* | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 223 |  * Check that @page is mapped at @address into @mm. | 
 | 224 |  * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 225 |  * On success returns with pte mapped and locked. | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 226 |  */ | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 227 | pte_t *page_check_address(struct page *page, struct mm_struct *mm, | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 228 | 			  unsigned long address, spinlock_t **ptlp) | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 229 | { | 
 | 230 | 	pgd_t *pgd; | 
 | 231 | 	pud_t *pud; | 
 | 232 | 	pmd_t *pmd; | 
 | 233 | 	pte_t *pte; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 234 | 	spinlock_t *ptl; | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 235 |  | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 236 | 	pgd = pgd_offset(mm, address); | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 237 | 	if (!pgd_present(*pgd)) | 
 | 238 | 		return NULL; | 
 | 239 |  | 
 | 240 | 	pud = pud_offset(pgd, address); | 
 | 241 | 	if (!pud_present(*pud)) | 
 | 242 | 		return NULL; | 
 | 243 |  | 
 | 244 | 	pmd = pmd_offset(pud, address); | 
 | 245 | 	if (!pmd_present(*pmd)) | 
 | 246 | 		return NULL; | 
 | 247 |  | 
 | 248 | 	pte = pte_offset_map(pmd, address); | 
 | 249 | 	/* Make a quick check before getting the lock */ | 
 | 250 | 	if (!pte_present(*pte)) { | 
 | 251 | 		pte_unmap(pte); | 
 | 252 | 		return NULL; | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 253 | 	} | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 254 |  | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 255 | 	ptl = pte_lockptr(mm, pmd); | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 256 | 	spin_lock(ptl); | 
 | 257 | 	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { | 
 | 258 | 		*ptlp = ptl; | 
 | 259 | 		return pte; | 
 | 260 | 	} | 
 | 261 | 	pte_unmap_unlock(pte, ptl); | 
 | 262 | 	return NULL; | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 263 | } | 
 | 264 |  | 
 | 265 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 |  * Subfunctions of page_referenced: page_referenced_one called | 
 | 267 |  * repeatedly from either page_referenced_anon or page_referenced_file. | 
 | 268 |  */ | 
 | 269 | static int page_referenced_one(struct page *page, | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 270 | 	struct vm_area_struct *vma, unsigned int *mapcount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | { | 
 | 272 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 273 | 	unsigned long address; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | 	pte_t *pte; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 275 | 	spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | 	int referenced = 0; | 
 | 277 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | 	address = vma_address(page, vma); | 
 | 279 | 	if (address == -EFAULT) | 
 | 280 | 		goto out; | 
 | 281 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 282 | 	pte = page_check_address(page, mm, address, &ptl); | 
 | 283 | 	if (!pte) | 
 | 284 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 |  | 
| Hugh Dickins | 5a9bbdc | 2008-02-04 22:29:23 -0800 | [diff] [blame] | 286 | 	if (vma->vm_flags & VM_LOCKED) { | 
 | 287 | 		referenced++; | 
 | 288 | 		*mapcount = 1;	/* break early from loop */ | 
 | 289 | 	} else if (ptep_clear_flush_young(vma, address, pte)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 290 | 		referenced++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 292 | 	/* Pretend the page is referenced if the task has the | 
 | 293 | 	   swap token and is in the middle of a page fault. */ | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 294 | 	if (mm != current->mm && has_swap_token(mm) && | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 295 | 			rwsem_is_locked(&mm->mmap_sem)) | 
 | 296 | 		referenced++; | 
 | 297 |  | 
 | 298 | 	(*mapcount)--; | 
 | 299 | 	pte_unmap_unlock(pte, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | out: | 
 | 301 | 	return referenced; | 
 | 302 | } | 
 | 303 |  | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 304 | static int page_referenced_anon(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | { | 
 | 306 | 	unsigned int mapcount; | 
 | 307 | 	struct anon_vma *anon_vma; | 
 | 308 | 	struct vm_area_struct *vma; | 
 | 309 | 	int referenced = 0; | 
 | 310 |  | 
 | 311 | 	anon_vma = page_lock_anon_vma(page); | 
 | 312 | 	if (!anon_vma) | 
 | 313 | 		return referenced; | 
 | 314 |  | 
 | 315 | 	mapcount = page_mapcount(page); | 
 | 316 | 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 317 | 		referenced += page_referenced_one(page, vma, &mapcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | 		if (!mapcount) | 
 | 319 | 			break; | 
 | 320 | 	} | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 321 |  | 
 | 322 | 	page_unlock_anon_vma(anon_vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | 	return referenced; | 
 | 324 | } | 
 | 325 |  | 
 | 326 | /** | 
 | 327 |  * page_referenced_file - referenced check for object-based rmap | 
 | 328 |  * @page: the page we're checking references on. | 
 | 329 |  * | 
 | 330 |  * For an object-based mapped page, find all the places it is mapped and | 
 | 331 |  * check/clear the referenced flag.  This is done by following the page->mapping | 
 | 332 |  * pointer, then walking the chain of vmas it holds.  It returns the number | 
 | 333 |  * of references it found. | 
 | 334 |  * | 
 | 335 |  * This function is only called from page_referenced for object-based pages. | 
 | 336 |  */ | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 337 | static int page_referenced_file(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { | 
 | 339 | 	unsigned int mapcount; | 
 | 340 | 	struct address_space *mapping = page->mapping; | 
 | 341 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 342 | 	struct vm_area_struct *vma; | 
 | 343 | 	struct prio_tree_iter iter; | 
 | 344 | 	int referenced = 0; | 
 | 345 |  | 
 | 346 | 	/* | 
 | 347 | 	 * The caller's checks on page->mapping and !PageAnon have made | 
 | 348 | 	 * sure that this is a file page: the check for page->mapping | 
 | 349 | 	 * excludes the case just before it gets set on an anon page. | 
 | 350 | 	 */ | 
 | 351 | 	BUG_ON(PageAnon(page)); | 
 | 352 |  | 
 | 353 | 	/* | 
 | 354 | 	 * The page lock not only makes sure that page->mapping cannot | 
 | 355 | 	 * suddenly be NULLified by truncation, it makes sure that the | 
 | 356 | 	 * structure at mapping cannot be freed and reused yet, | 
 | 357 | 	 * so we can safely take mapping->i_mmap_lock. | 
 | 358 | 	 */ | 
 | 359 | 	BUG_ON(!PageLocked(page)); | 
 | 360 |  | 
 | 361 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 362 |  | 
 | 363 | 	/* | 
 | 364 | 	 * i_mmap_lock does not stabilize mapcount at all, but mapcount | 
 | 365 | 	 * is more likely to be accurate if we note it after spinning. | 
 | 366 | 	 */ | 
 | 367 | 	mapcount = page_mapcount(page); | 
 | 368 |  | 
 | 369 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
 | 370 | 		if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) | 
 | 371 | 				  == (VM_LOCKED|VM_MAYSHARE)) { | 
 | 372 | 			referenced++; | 
 | 373 | 			break; | 
 | 374 | 		} | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 375 | 		referenced += page_referenced_one(page, vma, &mapcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | 		if (!mapcount) | 
 | 377 | 			break; | 
 | 378 | 	} | 
 | 379 |  | 
 | 380 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 381 | 	return referenced; | 
 | 382 | } | 
 | 383 |  | 
 | 384 | /** | 
 | 385 |  * page_referenced - test if the page was referenced | 
 | 386 |  * @page: the page to test | 
 | 387 |  * @is_locked: caller holds lock on the page | 
 | 388 |  * | 
 | 389 |  * Quick test_and_clear_referenced for all mappings to a page, | 
 | 390 |  * returns the number of ptes which referenced the page. | 
 | 391 |  */ | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 392 | int page_referenced(struct page *page, int is_locked) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | { | 
 | 394 | 	int referenced = 0; | 
 | 395 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | 	if (page_test_and_clear_young(page)) | 
 | 397 | 		referenced++; | 
 | 398 |  | 
 | 399 | 	if (TestClearPageReferenced(page)) | 
 | 400 | 		referenced++; | 
 | 401 |  | 
 | 402 | 	if (page_mapped(page) && page->mapping) { | 
 | 403 | 		if (PageAnon(page)) | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 404 | 			referenced += page_referenced_anon(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | 		else if (is_locked) | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 406 | 			referenced += page_referenced_file(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | 		else if (TestSetPageLocked(page)) | 
 | 408 | 			referenced++; | 
 | 409 | 		else { | 
 | 410 | 			if (page->mapping) | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 411 | 				referenced += page_referenced_file(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | 			unlock_page(page); | 
 | 413 | 		} | 
 | 414 | 	} | 
 | 415 | 	return referenced; | 
 | 416 | } | 
 | 417 |  | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 418 | static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | 
 | 419 | { | 
 | 420 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 421 | 	unsigned long address; | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 422 | 	pte_t *pte; | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 423 | 	spinlock_t *ptl; | 
 | 424 | 	int ret = 0; | 
 | 425 |  | 
 | 426 | 	address = vma_address(page, vma); | 
 | 427 | 	if (address == -EFAULT) | 
 | 428 | 		goto out; | 
 | 429 |  | 
 | 430 | 	pte = page_check_address(page, mm, address, &ptl); | 
 | 431 | 	if (!pte) | 
 | 432 | 		goto out; | 
 | 433 |  | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 434 | 	if (pte_dirty(*pte) || pte_write(*pte)) { | 
 | 435 | 		pte_t entry; | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 436 |  | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 437 | 		flush_cache_page(vma, address, pte_pfn(*pte)); | 
 | 438 | 		entry = ptep_clear_flush(vma, address, pte); | 
 | 439 | 		entry = pte_wrprotect(entry); | 
 | 440 | 		entry = pte_mkclean(entry); | 
| Al Viro | d6e88e6 | 2006-12-29 16:48:35 -0800 | [diff] [blame] | 441 | 		set_pte_at(mm, address, pte, entry); | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 442 | 		ret = 1; | 
 | 443 | 	} | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 444 |  | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 445 | 	pte_unmap_unlock(pte, ptl); | 
 | 446 | out: | 
 | 447 | 	return ret; | 
 | 448 | } | 
 | 449 |  | 
 | 450 | static int page_mkclean_file(struct address_space *mapping, struct page *page) | 
 | 451 | { | 
 | 452 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 453 | 	struct vm_area_struct *vma; | 
 | 454 | 	struct prio_tree_iter iter; | 
 | 455 | 	int ret = 0; | 
 | 456 |  | 
 | 457 | 	BUG_ON(PageAnon(page)); | 
 | 458 |  | 
 | 459 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 460 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
 | 461 | 		if (vma->vm_flags & VM_SHARED) | 
 | 462 | 			ret += page_mkclean_one(page, vma); | 
 | 463 | 	} | 
 | 464 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 465 | 	return ret; | 
 | 466 | } | 
 | 467 |  | 
 | 468 | int page_mkclean(struct page *page) | 
 | 469 | { | 
 | 470 | 	int ret = 0; | 
 | 471 |  | 
 | 472 | 	BUG_ON(!PageLocked(page)); | 
 | 473 |  | 
 | 474 | 	if (page_mapped(page)) { | 
 | 475 | 		struct address_space *mapping = page_mapping(page); | 
| Christian Borntraeger | ce7e9fa | 2007-11-20 11:13:36 +0100 | [diff] [blame] | 476 | 		if (mapping) { | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 477 | 			ret = page_mkclean_file(mapping, page); | 
| Christian Borntraeger | ce7e9fa | 2007-11-20 11:13:36 +0100 | [diff] [blame] | 478 | 			if (page_test_dirty(page)) { | 
 | 479 | 				page_clear_dirty(page); | 
 | 480 | 				ret = 1; | 
 | 481 | 			} | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 482 | 		} | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 483 | 	} | 
 | 484 |  | 
 | 485 | 	return ret; | 
 | 486 | } | 
| Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 487 | EXPORT_SYMBOL_GPL(page_mkclean); | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 488 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | /** | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 490 |  * page_set_anon_rmap - setup new anonymous rmap | 
 | 491 |  * @page:	the page to add the mapping to | 
 | 492 |  * @vma:	the vm area in which the mapping is added | 
 | 493 |  * @address:	the user virtual address mapped | 
 | 494 |  */ | 
 | 495 | static void __page_set_anon_rmap(struct page *page, | 
 | 496 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 497 | { | 
 | 498 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 499 |  | 
 | 500 | 	BUG_ON(!anon_vma); | 
 | 501 | 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 
 | 502 | 	page->mapping = (struct address_space *) anon_vma; | 
 | 503 |  | 
 | 504 | 	page->index = linear_page_index(vma, address); | 
 | 505 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 506 | 	/* | 
 | 507 | 	 * nr_mapped state can be updated without turning off | 
 | 508 | 	 * interrupts because it is not modified via interrupt. | 
 | 509 | 	 */ | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 510 | 	__inc_zone_page_state(page, NR_ANON_PAGES); | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 511 | } | 
 | 512 |  | 
 | 513 | /** | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 514 |  * page_set_anon_rmap - sanity check anonymous rmap addition | 
 | 515 |  * @page:	the page to add the mapping to | 
 | 516 |  * @vma:	the vm area in which the mapping is added | 
 | 517 |  * @address:	the user virtual address mapped | 
 | 518 |  */ | 
 | 519 | static void __page_check_anon_rmap(struct page *page, | 
 | 520 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 521 | { | 
 | 522 | #ifdef CONFIG_DEBUG_VM | 
 | 523 | 	/* | 
 | 524 | 	 * The page's anon-rmap details (mapping and index) are guaranteed to | 
 | 525 | 	 * be set up correctly at this point. | 
 | 526 | 	 * | 
 | 527 | 	 * We have exclusion against page_add_anon_rmap because the caller | 
 | 528 | 	 * always holds the page locked, except if called from page_dup_rmap, | 
 | 529 | 	 * in which case the page is already known to be setup. | 
 | 530 | 	 * | 
 | 531 | 	 * We have exclusion against page_add_new_anon_rmap because those pages | 
 | 532 | 	 * are initially only visible via the pagetables, and the pte is locked | 
 | 533 | 	 * over the call to page_add_new_anon_rmap. | 
 | 534 | 	 */ | 
 | 535 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 536 | 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 
 | 537 | 	BUG_ON(page->mapping != (struct address_space *)anon_vma); | 
 | 538 | 	BUG_ON(page->index != linear_page_index(vma, address)); | 
 | 539 | #endif | 
 | 540 | } | 
 | 541 |  | 
 | 542 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 |  * page_add_anon_rmap - add pte mapping to an anonymous page | 
 | 544 |  * @page:	the page to add the mapping to | 
 | 545 |  * @vma:	the vm area in which the mapping is added | 
 | 546 |  * @address:	the user virtual address mapped | 
 | 547 |  * | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 548 |  * The caller needs to hold the pte lock and the page must be locked. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 |  */ | 
 | 550 | void page_add_anon_rmap(struct page *page, | 
 | 551 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 552 | { | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 553 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 554 | 	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 555 | 	if (atomic_inc_and_test(&page->_mapcount)) | 
 | 556 | 		__page_set_anon_rmap(page, vma, address); | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 557 | 	else | 
 | 558 | 		__page_check_anon_rmap(page, vma, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | } | 
 | 560 |  | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 561 | /* | 
 | 562 |  * page_add_new_anon_rmap - add pte mapping to a new anonymous page | 
 | 563 |  * @page:	the page to add the mapping to | 
 | 564 |  * @vma:	the vm area in which the mapping is added | 
 | 565 |  * @address:	the user virtual address mapped | 
 | 566 |  * | 
 | 567 |  * Same as page_add_anon_rmap but must only be called on *new* pages. | 
 | 568 |  * This means the inc-and-test can be bypassed. | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 569 |  * Page does not have to be locked. | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 570 |  */ | 
 | 571 | void page_add_new_anon_rmap(struct page *page, | 
 | 572 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 573 | { | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 574 | 	BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 575 | 	atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ | 
 | 576 | 	__page_set_anon_rmap(page, vma, address); | 
 | 577 | } | 
 | 578 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | /** | 
 | 580 |  * page_add_file_rmap - add pte mapping to a file page | 
 | 581 |  * @page: the page to add the mapping to | 
 | 582 |  * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 583 |  * The caller needs to hold the pte lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 |  */ | 
 | 585 | void page_add_file_rmap(struct page *page) | 
 | 586 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | 	if (atomic_inc_and_test(&page->_mapcount)) | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 588 | 		__inc_zone_page_state(page, NR_FILE_MAPPED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | } | 
 | 590 |  | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 591 | #ifdef CONFIG_DEBUG_VM | 
 | 592 | /** | 
 | 593 |  * page_dup_rmap - duplicate pte mapping to a page | 
 | 594 |  * @page:	the page to add the mapping to | 
 | 595 |  * | 
 | 596 |  * For copy_page_range only: minimal extract from page_add_file_rmap / | 
 | 597 |  * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's | 
 | 598 |  * quicker. | 
 | 599 |  * | 
 | 600 |  * The caller needs to hold the pte lock. | 
 | 601 |  */ | 
 | 602 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) | 
 | 603 | { | 
 | 604 | 	BUG_ON(page_mapcount(page) == 0); | 
 | 605 | 	if (PageAnon(page)) | 
 | 606 | 		__page_check_anon_rmap(page, vma, address); | 
 | 607 | 	atomic_inc(&page->_mapcount); | 
 | 608 | } | 
 | 609 | #endif | 
 | 610 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | /** | 
 | 612 |  * page_remove_rmap - take down pte mapping from a page | 
 | 613 |  * @page: page to remove mapping from | 
 | 614 |  * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 615 |  * The caller needs to hold the pte lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 |  */ | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 617 | void page_remove_rmap(struct page *page, struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | 	if (atomic_add_negative(-1, &page->_mapcount)) { | 
| Nick Piggin | b7ab795 | 2006-03-22 00:08:42 -0800 | [diff] [blame] | 620 | 		if (unlikely(page_mapcount(page) < 0)) { | 
| Dave Jones | ef2bf0d | 2006-01-08 01:01:00 -0800 | [diff] [blame] | 621 | 			printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 622 | 			printk (KERN_EMERG "  page pfn = %lx\n", page_to_pfn(page)); | 
| Dave Jones | ef2bf0d | 2006-01-08 01:01:00 -0800 | [diff] [blame] | 623 | 			printk (KERN_EMERG "  page->flags = %lx\n", page->flags); | 
 | 624 | 			printk (KERN_EMERG "  page->count = %x\n", page_count(page)); | 
 | 625 | 			printk (KERN_EMERG "  page->mapping = %p\n", page->mapping); | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 626 | 			print_symbol (KERN_EMERG "  vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 627 | 			if (vma->vm_ops) { | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 628 | 				print_symbol (KERN_EMERG "  vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 629 | 				print_symbol (KERN_EMERG "  vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); | 
 | 630 | 			} | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 631 | 			if (vma->vm_file && vma->vm_file->f_op) | 
 | 632 | 				print_symbol (KERN_EMERG "  vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); | 
| Dave Jones | b16bc64 | 2006-10-11 01:21:27 -0700 | [diff] [blame] | 633 | 			BUG(); | 
| Dave Jones | ef2bf0d | 2006-01-08 01:01:00 -0800 | [diff] [blame] | 634 | 		} | 
| Dave Jones | b16bc64 | 2006-10-11 01:21:27 -0700 | [diff] [blame] | 635 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | 		/* | 
 | 637 | 		 * It would be tidy to reset the PageAnon mapping here, | 
 | 638 | 		 * but that might overwrite a racing page_add_anon_rmap | 
 | 639 | 		 * which increments mapcount after us but sets mapping | 
 | 640 | 		 * before us: so leave the reset to free_hot_cold_page, | 
 | 641 | 		 * and remember that it's only reliable while mapped. | 
 | 642 | 		 * Leaving it set also helps swapoff to reinstate ptes | 
 | 643 | 		 * faster for those pages still in swapcache. | 
 | 644 | 		 */ | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 645 | 		if (page_test_dirty(page)) { | 
 | 646 | 			page_clear_dirty(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | 			set_page_dirty(page); | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 648 | 		} | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 649 | 		__dec_zone_page_state(page, | 
 | 650 | 				PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | 	} | 
 | 652 | } | 
 | 653 |  | 
 | 654 | /* | 
 | 655 |  * Subfunctions of try_to_unmap: try_to_unmap_one called | 
 | 656 |  * repeatedly from either try_to_unmap_anon or try_to_unmap_file. | 
 | 657 |  */ | 
| Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 658 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 659 | 				int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | { | 
 | 661 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 662 | 	unsigned long address; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | 	pte_t *pte; | 
 | 664 | 	pte_t pteval; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 665 | 	spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | 	int ret = SWAP_AGAIN; | 
 | 667 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | 	address = vma_address(page, vma); | 
 | 669 | 	if (address == -EFAULT) | 
 | 670 | 		goto out; | 
 | 671 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 672 | 	pte = page_check_address(page, mm, address, &ptl); | 
 | 673 | 	if (!pte) | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 674 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 |  | 
 | 676 | 	/* | 
 | 677 | 	 * If the page is mlock()d, we cannot swap it out. | 
 | 678 | 	 * If it's recently referenced (perhaps page_referenced | 
 | 679 | 	 * skipped over this mm) then we should reactivate it. | 
 | 680 | 	 */ | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 681 | 	if (!migration && ((vma->vm_flags & VM_LOCKED) || | 
 | 682 | 			(ptep_clear_flush_young(vma, address, pte)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | 		ret = SWAP_FAIL; | 
 | 684 | 		goto out_unmap; | 
 | 685 | 	} | 
 | 686 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | 	/* Nuke the page table entry. */ | 
 | 688 | 	flush_cache_page(vma, address, page_to_pfn(page)); | 
 | 689 | 	pteval = ptep_clear_flush(vma, address, pte); | 
 | 690 |  | 
 | 691 | 	/* Move the dirty bit to the physical page now the pte is gone. */ | 
 | 692 | 	if (pte_dirty(pteval)) | 
 | 693 | 		set_page_dirty(page); | 
 | 694 |  | 
| Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 695 | 	/* Update high watermark before we lower rss */ | 
 | 696 | 	update_hiwater_rss(mm); | 
 | 697 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | 	if (PageAnon(page)) { | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 699 | 		swp_entry_t entry = { .val = page_private(page) }; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 700 |  | 
 | 701 | 		if (PageSwapCache(page)) { | 
 | 702 | 			/* | 
 | 703 | 			 * Store the swap location in the pte. | 
 | 704 | 			 * See handle_pte_fault() ... | 
 | 705 | 			 */ | 
 | 706 | 			swap_duplicate(entry); | 
 | 707 | 			if (list_empty(&mm->mmlist)) { | 
 | 708 | 				spin_lock(&mmlist_lock); | 
 | 709 | 				if (list_empty(&mm->mmlist)) | 
 | 710 | 					list_add(&mm->mmlist, &init_mm.mmlist); | 
 | 711 | 				spin_unlock(&mmlist_lock); | 
 | 712 | 			} | 
| Christoph Lameter | 442c913 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 713 | 			dec_mm_counter(mm, anon_rss); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 714 | #ifdef CONFIG_MIGRATION | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 715 | 		} else { | 
 | 716 | 			/* | 
 | 717 | 			 * Store the pfn of the page in a special migration | 
 | 718 | 			 * pte. do_swap_page() will wait until the migration | 
 | 719 | 			 * pte is removed and then restart fault handling. | 
 | 720 | 			 */ | 
 | 721 | 			BUG_ON(!migration); | 
 | 722 | 			entry = make_migration_entry(page, pte_write(pteval)); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 723 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | 		} | 
 | 725 | 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 
 | 726 | 		BUG_ON(pte_file(*pte)); | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 727 | 	} else | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 728 | #ifdef CONFIG_MIGRATION | 
 | 729 | 	if (migration) { | 
 | 730 | 		/* Establish migration entry for a file page */ | 
 | 731 | 		swp_entry_t entry; | 
 | 732 | 		entry = make_migration_entry(page, pte_write(pteval)); | 
 | 733 | 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 
 | 734 | 	} else | 
 | 735 | #endif | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 736 | 		dec_mm_counter(mm, file_rss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 738 |  | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 739 | 	page_remove_rmap(page, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | 	page_cache_release(page); | 
 | 741 |  | 
 | 742 | out_unmap: | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 743 | 	pte_unmap_unlock(pte, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | out: | 
 | 745 | 	return ret; | 
 | 746 | } | 
 | 747 |  | 
 | 748 | /* | 
 | 749 |  * objrmap doesn't work for nonlinear VMAs because the assumption that | 
 | 750 |  * offset-into-file correlates with offset-into-virtual-addresses does not hold. | 
 | 751 |  * Consequently, given a particular page and its ->index, we cannot locate the | 
 | 752 |  * ptes which are mapping that page without an exhaustive linear search. | 
 | 753 |  * | 
 | 754 |  * So what this code does is a mini "virtual scan" of each nonlinear VMA which | 
 | 755 |  * maps the file to which the target page belongs.  The ->vm_private_data field | 
 | 756 |  * holds the current cursor into that scan.  Successive searches will circulate | 
 | 757 |  * around the vma's virtual address space. | 
 | 758 |  * | 
 | 759 |  * So as more replacement pressure is applied to the pages in a nonlinear VMA, | 
 | 760 |  * more scanning pressure is placed against them as well.   Eventually pages | 
 | 761 |  * will become fully unmapped and are eligible for eviction. | 
 | 762 |  * | 
 | 763 |  * For very sparsely populated VMAs this is a little inefficient - chances are | 
 | 764 |  * there there won't be many ptes located within the scan cluster.  In this case | 
 | 765 |  * maybe we could scan further - to the end of the pte page, perhaps. | 
 | 766 |  */ | 
 | 767 | #define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE) | 
 | 768 | #define CLUSTER_MASK	(~(CLUSTER_SIZE - 1)) | 
 | 769 |  | 
 | 770 | static void try_to_unmap_cluster(unsigned long cursor, | 
 | 771 | 	unsigned int *mapcount, struct vm_area_struct *vma) | 
 | 772 | { | 
 | 773 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 774 | 	pgd_t *pgd; | 
 | 775 | 	pud_t *pud; | 
 | 776 | 	pmd_t *pmd; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 777 | 	pte_t *pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | 	pte_t pteval; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 779 | 	spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | 	struct page *page; | 
 | 781 | 	unsigned long address; | 
 | 782 | 	unsigned long end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | 	address = (vma->vm_start + cursor) & CLUSTER_MASK; | 
 | 785 | 	end = address + CLUSTER_SIZE; | 
 | 786 | 	if (address < vma->vm_start) | 
 | 787 | 		address = vma->vm_start; | 
 | 788 | 	if (end > vma->vm_end) | 
 | 789 | 		end = vma->vm_end; | 
 | 790 |  | 
 | 791 | 	pgd = pgd_offset(mm, address); | 
 | 792 | 	if (!pgd_present(*pgd)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 793 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 |  | 
 | 795 | 	pud = pud_offset(pgd, address); | 
 | 796 | 	if (!pud_present(*pud)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 797 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 |  | 
 | 799 | 	pmd = pmd_offset(pud, address); | 
 | 800 | 	if (!pmd_present(*pmd)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 801 | 		return; | 
 | 802 |  | 
 | 803 | 	pte = pte_offset_map_lock(mm, pmd, address, &ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 |  | 
| Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 805 | 	/* Update high watermark before we lower rss */ | 
 | 806 | 	update_hiwater_rss(mm); | 
 | 807 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 808 | 	for (; address < end; pte++, address += PAGE_SIZE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | 		if (!pte_present(*pte)) | 
 | 810 | 			continue; | 
| Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 811 | 		page = vm_normal_page(vma, address, *pte); | 
 | 812 | 		BUG_ON(!page || PageAnon(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 |  | 
 | 814 | 		if (ptep_clear_flush_young(vma, address, pte)) | 
 | 815 | 			continue; | 
 | 816 |  | 
 | 817 | 		/* Nuke the page table entry. */ | 
| Ben Collins | eca3513 | 2005-11-29 11:45:26 -0800 | [diff] [blame] | 818 | 		flush_cache_page(vma, address, pte_pfn(*pte)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | 		pteval = ptep_clear_flush(vma, address, pte); | 
 | 820 |  | 
 | 821 | 		/* If nonlinear, store the file page offset in the pte. */ | 
 | 822 | 		if (page->index != linear_page_index(vma, address)) | 
 | 823 | 			set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); | 
 | 824 |  | 
 | 825 | 		/* Move the dirty bit to the physical page now the pte is gone. */ | 
 | 826 | 		if (pte_dirty(pteval)) | 
 | 827 | 			set_page_dirty(page); | 
 | 828 |  | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 829 | 		page_remove_rmap(page, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | 		page_cache_release(page); | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 831 | 		dec_mm_counter(mm, file_rss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | 		(*mapcount)--; | 
 | 833 | 	} | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 834 | 	pte_unmap_unlock(pte - 1, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 | } | 
 | 836 |  | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 837 | static int try_to_unmap_anon(struct page *page, int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | { | 
 | 839 | 	struct anon_vma *anon_vma; | 
 | 840 | 	struct vm_area_struct *vma; | 
 | 841 | 	int ret = SWAP_AGAIN; | 
 | 842 |  | 
 | 843 | 	anon_vma = page_lock_anon_vma(page); | 
 | 844 | 	if (!anon_vma) | 
 | 845 | 		return ret; | 
 | 846 |  | 
 | 847 | 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 848 | 		ret = try_to_unmap_one(page, vma, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | 		if (ret == SWAP_FAIL || !page_mapped(page)) | 
 | 850 | 			break; | 
 | 851 | 	} | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 852 |  | 
 | 853 | 	page_unlock_anon_vma(anon_vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | 	return ret; | 
 | 855 | } | 
 | 856 |  | 
 | 857 | /** | 
 | 858 |  * try_to_unmap_file - unmap file page using the object-based rmap method | 
 | 859 |  * @page: the page to unmap | 
 | 860 |  * | 
 | 861 |  * Find all the mappings of a page using the mapping pointer and the vma chains | 
 | 862 |  * contained in the address_space struct it points to. | 
 | 863 |  * | 
 | 864 |  * This function is only called from try_to_unmap for object-based pages. | 
 | 865 |  */ | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 866 | static int try_to_unmap_file(struct page *page, int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | { | 
 | 868 | 	struct address_space *mapping = page->mapping; | 
 | 869 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 870 | 	struct vm_area_struct *vma; | 
 | 871 | 	struct prio_tree_iter iter; | 
 | 872 | 	int ret = SWAP_AGAIN; | 
 | 873 | 	unsigned long cursor; | 
 | 874 | 	unsigned long max_nl_cursor = 0; | 
 | 875 | 	unsigned long max_nl_size = 0; | 
 | 876 | 	unsigned int mapcount; | 
 | 877 |  | 
 | 878 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 879 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 880 | 		ret = try_to_unmap_one(page, vma, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | 		if (ret == SWAP_FAIL || !page_mapped(page)) | 
 | 882 | 			goto out; | 
 | 883 | 	} | 
 | 884 |  | 
 | 885 | 	if (list_empty(&mapping->i_mmap_nonlinear)) | 
 | 886 | 		goto out; | 
 | 887 |  | 
 | 888 | 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 
 | 889 | 						shared.vm_set.list) { | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 890 | 		if ((vma->vm_flags & VM_LOCKED) && !migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | 			continue; | 
 | 892 | 		cursor = (unsigned long) vma->vm_private_data; | 
 | 893 | 		if (cursor > max_nl_cursor) | 
 | 894 | 			max_nl_cursor = cursor; | 
 | 895 | 		cursor = vma->vm_end - vma->vm_start; | 
 | 896 | 		if (cursor > max_nl_size) | 
 | 897 | 			max_nl_size = cursor; | 
 | 898 | 	} | 
 | 899 |  | 
 | 900 | 	if (max_nl_size == 0) {	/* any nonlinears locked or reserved */ | 
 | 901 | 		ret = SWAP_FAIL; | 
 | 902 | 		goto out; | 
 | 903 | 	} | 
 | 904 |  | 
 | 905 | 	/* | 
 | 906 | 	 * We don't try to search for this page in the nonlinear vmas, | 
 | 907 | 	 * and page_referenced wouldn't have found it anyway.  Instead | 
 | 908 | 	 * just walk the nonlinear vmas trying to age and unmap some. | 
 | 909 | 	 * The mapcount of the page we came in with is irrelevant, | 
 | 910 | 	 * but even so use it as a guide to how hard we should try? | 
 | 911 | 	 */ | 
 | 912 | 	mapcount = page_mapcount(page); | 
 | 913 | 	if (!mapcount) | 
 | 914 | 		goto out; | 
 | 915 | 	cond_resched_lock(&mapping->i_mmap_lock); | 
 | 916 |  | 
 | 917 | 	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; | 
 | 918 | 	if (max_nl_cursor == 0) | 
 | 919 | 		max_nl_cursor = CLUSTER_SIZE; | 
 | 920 |  | 
 | 921 | 	do { | 
 | 922 | 		list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 
 | 923 | 						shared.vm_set.list) { | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 924 | 			if ((vma->vm_flags & VM_LOCKED) && !migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | 				continue; | 
 | 926 | 			cursor = (unsigned long) vma->vm_private_data; | 
| Hugh Dickins | 839b968 | 2005-09-03 15:54:43 -0700 | [diff] [blame] | 927 | 			while ( cursor < max_nl_cursor && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | 				cursor < vma->vm_end - vma->vm_start) { | 
 | 929 | 				try_to_unmap_cluster(cursor, &mapcount, vma); | 
 | 930 | 				cursor += CLUSTER_SIZE; | 
 | 931 | 				vma->vm_private_data = (void *) cursor; | 
 | 932 | 				if ((int)mapcount <= 0) | 
 | 933 | 					goto out; | 
 | 934 | 			} | 
 | 935 | 			vma->vm_private_data = (void *) max_nl_cursor; | 
 | 936 | 		} | 
 | 937 | 		cond_resched_lock(&mapping->i_mmap_lock); | 
 | 938 | 		max_nl_cursor += CLUSTER_SIZE; | 
 | 939 | 	} while (max_nl_cursor <= max_nl_size); | 
 | 940 |  | 
 | 941 | 	/* | 
 | 942 | 	 * Don't loop forever (perhaps all the remaining pages are | 
 | 943 | 	 * in locked vmas).  Reset cursor on all unreserved nonlinear | 
 | 944 | 	 * vmas, now forgetting on which ones it had fallen behind. | 
 | 945 | 	 */ | 
| Hugh Dickins | 101d2be | 2005-11-21 21:32:16 -0800 | [diff] [blame] | 946 | 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) | 
 | 947 | 		vma->vm_private_data = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | out: | 
 | 949 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 950 | 	return ret; | 
 | 951 | } | 
 | 952 |  | 
 | 953 | /** | 
 | 954 |  * try_to_unmap - try to remove all page table mappings to a page | 
 | 955 |  * @page: the page to get unmapped | 
 | 956 |  * | 
 | 957 |  * Tries to remove all the page table entries which are mapping this | 
 | 958 |  * page, used in the pageout path.  Caller must hold the page lock. | 
 | 959 |  * Return values are: | 
 | 960 |  * | 
 | 961 |  * SWAP_SUCCESS	- we succeeded in removing all mappings | 
 | 962 |  * SWAP_AGAIN	- we missed a mapping, try again later | 
 | 963 |  * SWAP_FAIL	- the page is unswappable | 
 | 964 |  */ | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 965 | int try_to_unmap(struct page *page, int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | { | 
 | 967 | 	int ret; | 
 | 968 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | 	BUG_ON(!PageLocked(page)); | 
 | 970 |  | 
 | 971 | 	if (PageAnon(page)) | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 972 | 		ret = try_to_unmap_anon(page, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | 	else | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 974 | 		ret = try_to_unmap_file(page, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 |  | 
 | 976 | 	if (!page_mapped(page)) | 
 | 977 | 		ret = SWAP_SUCCESS; | 
 | 978 | 	return ret; | 
 | 979 | } | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 980 |  |