| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * mm/rmap.c - physical to virtual reverse mappings | 
 | 3 |  * | 
 | 4 |  * Copyright 2001, Rik van Riel <riel@conectiva.com.br> | 
 | 5 |  * Released under the General Public License (GPL). | 
 | 6 |  * | 
 | 7 |  * Simple, low overhead reverse mapping scheme. | 
 | 8 |  * Please try to keep this thing as modular as possible. | 
 | 9 |  * | 
 | 10 |  * Provides methods for unmapping each kind of mapped page: | 
 | 11 |  * the anon methods track anonymous pages, and | 
 | 12 |  * the file methods track pages belonging to an inode. | 
 | 13 |  * | 
 | 14 |  * Original design by Rik van Riel <riel@conectiva.com.br> 2001 | 
 | 15 |  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 | 
 | 16 |  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 | 
 | 17 |  * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | /* | 
 | 21 |  * Lock ordering in mm: | 
 | 22 |  * | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 23 |  * inode->i_mutex	(while writing or truncating, not reading or faulting) | 
| Nick Piggin | 82591e6 | 2006-10-19 23:29:10 -0700 | [diff] [blame] | 24 |  *   inode->i_alloc_sem (vmtruncate_range) | 
 | 25 |  *   mm->mmap_sem | 
 | 26 |  *     page->flags PG_locked (lock_page) | 
 | 27 |  *       mapping->i_mmap_lock | 
 | 28 |  *         anon_vma->lock | 
 | 29 |  *           mm->page_table_lock or pte_lock | 
 | 30 |  *             zone->lru_lock (in mark_page_accessed, isolate_lru_page) | 
 | 31 |  *             swap_lock (in swap_duplicate, swap_info_get) | 
 | 32 |  *               mmlist_lock (in mmput, drain_mmlist and others) | 
 | 33 |  *               mapping->private_lock (in __set_page_dirty_buffers) | 
 | 34 |  *               inode_lock (in set_page_dirty's __mark_inode_dirty) | 
 | 35 |  *                 sb_lock (within inode_lock in fs/fs-writeback.c) | 
 | 36 |  *                 mapping->tree_lock (widely used, in set_page_dirty, | 
 | 37 |  *                           in arch-dependent flush_dcache_mmap_lock, | 
 | 38 |  *                           within inode_lock in __sync_single_inode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  */ | 
 | 40 |  | 
 | 41 | #include <linux/mm.h> | 
 | 42 | #include <linux/pagemap.h> | 
 | 43 | #include <linux/swap.h> | 
 | 44 | #include <linux/swapops.h> | 
 | 45 | #include <linux/slab.h> | 
 | 46 | #include <linux/init.h> | 
 | 47 | #include <linux/rmap.h> | 
 | 48 | #include <linux/rcupdate.h> | 
| Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 49 | #include <linux/module.h> | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 50 | #include <linux/kallsyms.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 51 | #include <linux/memcontrol.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 |  | 
 | 53 | #include <asm/tlbflush.h> | 
 | 54 |  | 
| Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 55 | struct kmem_cache *anon_vma_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | /* This must be called under the mmap_sem. */ | 
 | 58 | int anon_vma_prepare(struct vm_area_struct *vma) | 
 | 59 | { | 
 | 60 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 61 |  | 
 | 62 | 	might_sleep(); | 
 | 63 | 	if (unlikely(!anon_vma)) { | 
 | 64 | 		struct mm_struct *mm = vma->vm_mm; | 
 | 65 | 		struct anon_vma *allocated, *locked; | 
 | 66 |  | 
 | 67 | 		anon_vma = find_mergeable_anon_vma(vma); | 
 | 68 | 		if (anon_vma) { | 
 | 69 | 			allocated = NULL; | 
 | 70 | 			locked = anon_vma; | 
 | 71 | 			spin_lock(&locked->lock); | 
 | 72 | 		} else { | 
 | 73 | 			anon_vma = anon_vma_alloc(); | 
 | 74 | 			if (unlikely(!anon_vma)) | 
 | 75 | 				return -ENOMEM; | 
 | 76 | 			allocated = anon_vma; | 
 | 77 | 			locked = NULL; | 
 | 78 | 		} | 
 | 79 |  | 
 | 80 | 		/* page_table_lock to protect against threads */ | 
 | 81 | 		spin_lock(&mm->page_table_lock); | 
 | 82 | 		if (likely(!vma->anon_vma)) { | 
 | 83 | 			vma->anon_vma = anon_vma; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 84 | 			list_add_tail(&vma->anon_vma_node, &anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | 			allocated = NULL; | 
 | 86 | 		} | 
 | 87 | 		spin_unlock(&mm->page_table_lock); | 
 | 88 |  | 
 | 89 | 		if (locked) | 
 | 90 | 			spin_unlock(&locked->lock); | 
 | 91 | 		if (unlikely(allocated)) | 
 | 92 | 			anon_vma_free(allocated); | 
 | 93 | 	} | 
 | 94 | 	return 0; | 
 | 95 | } | 
 | 96 |  | 
 | 97 | void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) | 
 | 98 | { | 
 | 99 | 	BUG_ON(vma->anon_vma != next->anon_vma); | 
 | 100 | 	list_del(&next->anon_vma_node); | 
 | 101 | } | 
 | 102 |  | 
 | 103 | void __anon_vma_link(struct vm_area_struct *vma) | 
 | 104 | { | 
 | 105 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 106 |  | 
| Hugh Dickins | 30acbab | 2007-06-27 14:09:53 -0700 | [diff] [blame] | 107 | 	if (anon_vma) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 108 | 		list_add_tail(&vma->anon_vma_node, &anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | } | 
 | 110 |  | 
 | 111 | void anon_vma_link(struct vm_area_struct *vma) | 
 | 112 | { | 
 | 113 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 114 |  | 
 | 115 | 	if (anon_vma) { | 
 | 116 | 		spin_lock(&anon_vma->lock); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 117 | 		list_add_tail(&vma->anon_vma_node, &anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | 		spin_unlock(&anon_vma->lock); | 
 | 119 | 	} | 
 | 120 | } | 
 | 121 |  | 
 | 122 | void anon_vma_unlink(struct vm_area_struct *vma) | 
 | 123 | { | 
 | 124 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 125 | 	int empty; | 
 | 126 |  | 
 | 127 | 	if (!anon_vma) | 
 | 128 | 		return; | 
 | 129 |  | 
 | 130 | 	spin_lock(&anon_vma->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | 	list_del(&vma->anon_vma_node); | 
 | 132 |  | 
 | 133 | 	/* We must garbage collect the anon_vma if it's empty */ | 
 | 134 | 	empty = list_empty(&anon_vma->head); | 
 | 135 | 	spin_unlock(&anon_vma->lock); | 
 | 136 |  | 
 | 137 | 	if (empty) | 
 | 138 | 		anon_vma_free(anon_vma); | 
 | 139 | } | 
 | 140 |  | 
| Christoph Lameter | 4ba9b9d | 2007-10-16 23:25:51 -0700 | [diff] [blame] | 141 | static void anon_vma_ctor(struct kmem_cache *cachep, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { | 
| Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 143 | 	struct anon_vma *anon_vma = data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 |  | 
| Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 145 | 	spin_lock_init(&anon_vma->lock); | 
 | 146 | 	INIT_LIST_HEAD(&anon_vma->head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } | 
 | 148 |  | 
 | 149 | void __init anon_vma_init(void) | 
 | 150 | { | 
 | 151 | 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 152 | 			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } | 
 | 154 |  | 
 | 155 | /* | 
 | 156 |  * Getting a lock on a stable anon_vma from a page off the LRU is | 
 | 157 |  * tricky: page_lock_anon_vma rely on RCU to guard against the races. | 
 | 158 |  */ | 
 | 159 | static struct anon_vma *page_lock_anon_vma(struct page *page) | 
 | 160 | { | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 161 | 	struct anon_vma *anon_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | 	unsigned long anon_mapping; | 
 | 163 |  | 
 | 164 | 	rcu_read_lock(); | 
 | 165 | 	anon_mapping = (unsigned long) page->mapping; | 
 | 166 | 	if (!(anon_mapping & PAGE_MAPPING_ANON)) | 
 | 167 | 		goto out; | 
 | 168 | 	if (!page_mapped(page)) | 
 | 169 | 		goto out; | 
 | 170 |  | 
 | 171 | 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | 
 | 172 | 	spin_lock(&anon_vma->lock); | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 173 | 	return anon_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | out: | 
 | 175 | 	rcu_read_unlock(); | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 176 | 	return NULL; | 
 | 177 | } | 
 | 178 |  | 
 | 179 | static void page_unlock_anon_vma(struct anon_vma *anon_vma) | 
 | 180 | { | 
 | 181 | 	spin_unlock(&anon_vma->lock); | 
 | 182 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } | 
 | 184 |  | 
 | 185 | /* | 
| Lee Schermerhorn | 3ad33b2 | 2007-11-14 16:59:10 -0800 | [diff] [blame] | 186 |  * At what user virtual address is page expected in @vma? | 
 | 187 |  * Returns virtual address or -EFAULT if page's index/offset is not | 
 | 188 |  * within the range mapped the @vma. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 |  */ | 
 | 190 | static inline unsigned long | 
 | 191 | vma_address(struct page *page, struct vm_area_struct *vma) | 
 | 192 | { | 
 | 193 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 194 | 	unsigned long address; | 
 | 195 |  | 
 | 196 | 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 
 | 197 | 	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { | 
| Lee Schermerhorn | 3ad33b2 | 2007-11-14 16:59:10 -0800 | [diff] [blame] | 198 | 		/* page should be within @vma mapping range */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | 		return -EFAULT; | 
 | 200 | 	} | 
 | 201 | 	return address; | 
 | 202 | } | 
 | 203 |  | 
 | 204 | /* | 
 | 205 |  * At what user virtual address is page expected in vma? checking that the | 
| Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 206 |  * page matches the vma: currently only used on anon pages, by unuse_vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 |  */ | 
 | 208 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 
 | 209 | { | 
 | 210 | 	if (PageAnon(page)) { | 
 | 211 | 		if ((void *)vma->anon_vma != | 
 | 212 | 		    (void *)page->mapping - PAGE_MAPPING_ANON) | 
 | 213 | 			return -EFAULT; | 
 | 214 | 	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | 
| Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 215 | 		if (!vma->vm_file || | 
 | 216 | 		    vma->vm_file->f_mapping != page->mapping) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | 			return -EFAULT; | 
 | 218 | 	} else | 
 | 219 | 		return -EFAULT; | 
 | 220 | 	return vma_address(page, vma); | 
 | 221 | } | 
 | 222 |  | 
 | 223 | /* | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 224 |  * Check that @page is mapped at @address into @mm. | 
 | 225 |  * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 226 |  * On success returns with pte mapped and locked. | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 227 |  */ | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 228 | pte_t *page_check_address(struct page *page, struct mm_struct *mm, | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 229 | 			  unsigned long address, spinlock_t **ptlp) | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 230 | { | 
 | 231 | 	pgd_t *pgd; | 
 | 232 | 	pud_t *pud; | 
 | 233 | 	pmd_t *pmd; | 
 | 234 | 	pte_t *pte; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 235 | 	spinlock_t *ptl; | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 236 |  | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 237 | 	pgd = pgd_offset(mm, address); | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 238 | 	if (!pgd_present(*pgd)) | 
 | 239 | 		return NULL; | 
 | 240 |  | 
 | 241 | 	pud = pud_offset(pgd, address); | 
 | 242 | 	if (!pud_present(*pud)) | 
 | 243 | 		return NULL; | 
 | 244 |  | 
 | 245 | 	pmd = pmd_offset(pud, address); | 
 | 246 | 	if (!pmd_present(*pmd)) | 
 | 247 | 		return NULL; | 
 | 248 |  | 
 | 249 | 	pte = pte_offset_map(pmd, address); | 
 | 250 | 	/* Make a quick check before getting the lock */ | 
 | 251 | 	if (!pte_present(*pte)) { | 
 | 252 | 		pte_unmap(pte); | 
 | 253 | 		return NULL; | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 254 | 	} | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 255 |  | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 256 | 	ptl = pte_lockptr(mm, pmd); | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 257 | 	spin_lock(ptl); | 
 | 258 | 	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { | 
 | 259 | 		*ptlp = ptl; | 
 | 260 | 		return pte; | 
 | 261 | 	} | 
 | 262 | 	pte_unmap_unlock(pte, ptl); | 
 | 263 | 	return NULL; | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 264 | } | 
 | 265 |  | 
 | 266 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 |  * Subfunctions of page_referenced: page_referenced_one called | 
 | 268 |  * repeatedly from either page_referenced_anon or page_referenced_file. | 
 | 269 |  */ | 
 | 270 | static int page_referenced_one(struct page *page, | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 271 | 	struct vm_area_struct *vma, unsigned int *mapcount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | { | 
 | 273 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 274 | 	unsigned long address; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | 	pte_t *pte; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 276 | 	spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | 	int referenced = 0; | 
 | 278 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | 	address = vma_address(page, vma); | 
 | 280 | 	if (address == -EFAULT) | 
 | 281 | 		goto out; | 
 | 282 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 283 | 	pte = page_check_address(page, mm, address, &ptl); | 
 | 284 | 	if (!pte) | 
 | 285 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 |  | 
| Hugh Dickins | 5a9bbdc | 2008-02-04 22:29:23 -0800 | [diff] [blame] | 287 | 	if (vma->vm_flags & VM_LOCKED) { | 
 | 288 | 		referenced++; | 
 | 289 | 		*mapcount = 1;	/* break early from loop */ | 
 | 290 | 	} else if (ptep_clear_flush_young(vma, address, pte)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 291 | 		referenced++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 293 | 	/* Pretend the page is referenced if the task has the | 
 | 294 | 	   swap token and is in the middle of a page fault. */ | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 295 | 	if (mm != current->mm && has_swap_token(mm) && | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 296 | 			rwsem_is_locked(&mm->mmap_sem)) | 
 | 297 | 		referenced++; | 
 | 298 |  | 
 | 299 | 	(*mapcount)--; | 
 | 300 | 	pte_unmap_unlock(pte, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | out: | 
 | 302 | 	return referenced; | 
 | 303 | } | 
 | 304 |  | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 305 | static int page_referenced_anon(struct page *page, | 
 | 306 | 				struct mem_cgroup *mem_cont) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | { | 
 | 308 | 	unsigned int mapcount; | 
 | 309 | 	struct anon_vma *anon_vma; | 
 | 310 | 	struct vm_area_struct *vma; | 
 | 311 | 	int referenced = 0; | 
 | 312 |  | 
 | 313 | 	anon_vma = page_lock_anon_vma(page); | 
 | 314 | 	if (!anon_vma) | 
 | 315 | 		return referenced; | 
 | 316 |  | 
 | 317 | 	mapcount = page_mapcount(page); | 
 | 318 | 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 319 | 		/* | 
 | 320 | 		 * If we are reclaiming on behalf of a cgroup, skip | 
 | 321 | 		 * counting on behalf of references from different | 
 | 322 | 		 * cgroups | 
 | 323 | 		 */ | 
| Hugh Dickins | bd845e3 | 2008-03-04 14:29:01 -0800 | [diff] [blame] | 324 | 		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 325 | 			continue; | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 326 | 		referenced += page_referenced_one(page, vma, &mapcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | 		if (!mapcount) | 
 | 328 | 			break; | 
 | 329 | 	} | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 330 |  | 
 | 331 | 	page_unlock_anon_vma(anon_vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | 	return referenced; | 
 | 333 | } | 
 | 334 |  | 
 | 335 | /** | 
 | 336 |  * page_referenced_file - referenced check for object-based rmap | 
 | 337 |  * @page: the page we're checking references on. | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 338 |  * @mem_cont: target memory controller | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 |  * | 
 | 340 |  * For an object-based mapped page, find all the places it is mapped and | 
 | 341 |  * check/clear the referenced flag.  This is done by following the page->mapping | 
 | 342 |  * pointer, then walking the chain of vmas it holds.  It returns the number | 
 | 343 |  * of references it found. | 
 | 344 |  * | 
 | 345 |  * This function is only called from page_referenced for object-based pages. | 
 | 346 |  */ | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 347 | static int page_referenced_file(struct page *page, | 
 | 348 | 				struct mem_cgroup *mem_cont) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | { | 
 | 350 | 	unsigned int mapcount; | 
 | 351 | 	struct address_space *mapping = page->mapping; | 
 | 352 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 353 | 	struct vm_area_struct *vma; | 
 | 354 | 	struct prio_tree_iter iter; | 
 | 355 | 	int referenced = 0; | 
 | 356 |  | 
 | 357 | 	/* | 
 | 358 | 	 * The caller's checks on page->mapping and !PageAnon have made | 
 | 359 | 	 * sure that this is a file page: the check for page->mapping | 
 | 360 | 	 * excludes the case just before it gets set on an anon page. | 
 | 361 | 	 */ | 
 | 362 | 	BUG_ON(PageAnon(page)); | 
 | 363 |  | 
 | 364 | 	/* | 
 | 365 | 	 * The page lock not only makes sure that page->mapping cannot | 
 | 366 | 	 * suddenly be NULLified by truncation, it makes sure that the | 
 | 367 | 	 * structure at mapping cannot be freed and reused yet, | 
 | 368 | 	 * so we can safely take mapping->i_mmap_lock. | 
 | 369 | 	 */ | 
 | 370 | 	BUG_ON(!PageLocked(page)); | 
 | 371 |  | 
 | 372 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 373 |  | 
 | 374 | 	/* | 
 | 375 | 	 * i_mmap_lock does not stabilize mapcount at all, but mapcount | 
 | 376 | 	 * is more likely to be accurate if we note it after spinning. | 
 | 377 | 	 */ | 
 | 378 | 	mapcount = page_mapcount(page); | 
 | 379 |  | 
 | 380 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 381 | 		/* | 
 | 382 | 		 * If we are reclaiming on behalf of a cgroup, skip | 
 | 383 | 		 * counting on behalf of references from different | 
 | 384 | 		 * cgroups | 
 | 385 | 		 */ | 
| Hugh Dickins | bd845e3 | 2008-03-04 14:29:01 -0800 | [diff] [blame] | 386 | 		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 387 | 			continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | 		if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) | 
 | 389 | 				  == (VM_LOCKED|VM_MAYSHARE)) { | 
 | 390 | 			referenced++; | 
 | 391 | 			break; | 
 | 392 | 		} | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 393 | 		referenced += page_referenced_one(page, vma, &mapcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | 		if (!mapcount) | 
 | 395 | 			break; | 
 | 396 | 	} | 
 | 397 |  | 
 | 398 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 399 | 	return referenced; | 
 | 400 | } | 
 | 401 |  | 
 | 402 | /** | 
 | 403 |  * page_referenced - test if the page was referenced | 
 | 404 |  * @page: the page to test | 
 | 405 |  * @is_locked: caller holds lock on the page | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 406 |  * @mem_cont: target memory controller | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 |  * | 
 | 408 |  * Quick test_and_clear_referenced for all mappings to a page, | 
 | 409 |  * returns the number of ptes which referenced the page. | 
 | 410 |  */ | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 411 | int page_referenced(struct page *page, int is_locked, | 
 | 412 | 			struct mem_cgroup *mem_cont) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | { | 
 | 414 | 	int referenced = 0; | 
 | 415 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | 	if (TestClearPageReferenced(page)) | 
 | 417 | 		referenced++; | 
 | 418 |  | 
 | 419 | 	if (page_mapped(page) && page->mapping) { | 
 | 420 | 		if (PageAnon(page)) | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 421 | 			referenced += page_referenced_anon(page, mem_cont); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | 		else if (is_locked) | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 423 | 			referenced += page_referenced_file(page, mem_cont); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | 		else if (TestSetPageLocked(page)) | 
 | 425 | 			referenced++; | 
 | 426 | 		else { | 
 | 427 | 			if (page->mapping) | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 428 | 				referenced += | 
 | 429 | 					page_referenced_file(page, mem_cont); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | 			unlock_page(page); | 
 | 431 | 		} | 
 | 432 | 	} | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 433 |  | 
 | 434 | 	if (page_test_and_clear_young(page)) | 
 | 435 | 		referenced++; | 
 | 436 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | 	return referenced; | 
 | 438 | } | 
 | 439 |  | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 440 | static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | 
 | 441 | { | 
 | 442 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 443 | 	unsigned long address; | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 444 | 	pte_t *pte; | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 445 | 	spinlock_t *ptl; | 
 | 446 | 	int ret = 0; | 
 | 447 |  | 
 | 448 | 	address = vma_address(page, vma); | 
 | 449 | 	if (address == -EFAULT) | 
 | 450 | 		goto out; | 
 | 451 |  | 
 | 452 | 	pte = page_check_address(page, mm, address, &ptl); | 
 | 453 | 	if (!pte) | 
 | 454 | 		goto out; | 
 | 455 |  | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 456 | 	if (pte_dirty(*pte) || pte_write(*pte)) { | 
 | 457 | 		pte_t entry; | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 458 |  | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 459 | 		flush_cache_page(vma, address, pte_pfn(*pte)); | 
 | 460 | 		entry = ptep_clear_flush(vma, address, pte); | 
 | 461 | 		entry = pte_wrprotect(entry); | 
 | 462 | 		entry = pte_mkclean(entry); | 
| Al Viro | d6e88e6 | 2006-12-29 16:48:35 -0800 | [diff] [blame] | 463 | 		set_pte_at(mm, address, pte, entry); | 
| Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 464 | 		ret = 1; | 
 | 465 | 	} | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 466 |  | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 467 | 	pte_unmap_unlock(pte, ptl); | 
 | 468 | out: | 
 | 469 | 	return ret; | 
 | 470 | } | 
 | 471 |  | 
 | 472 | static int page_mkclean_file(struct address_space *mapping, struct page *page) | 
 | 473 | { | 
 | 474 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 475 | 	struct vm_area_struct *vma; | 
 | 476 | 	struct prio_tree_iter iter; | 
 | 477 | 	int ret = 0; | 
 | 478 |  | 
 | 479 | 	BUG_ON(PageAnon(page)); | 
 | 480 |  | 
 | 481 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 482 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
 | 483 | 		if (vma->vm_flags & VM_SHARED) | 
 | 484 | 			ret += page_mkclean_one(page, vma); | 
 | 485 | 	} | 
 | 486 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 487 | 	return ret; | 
 | 488 | } | 
 | 489 |  | 
 | 490 | int page_mkclean(struct page *page) | 
 | 491 | { | 
 | 492 | 	int ret = 0; | 
 | 493 |  | 
 | 494 | 	BUG_ON(!PageLocked(page)); | 
 | 495 |  | 
 | 496 | 	if (page_mapped(page)) { | 
 | 497 | 		struct address_space *mapping = page_mapping(page); | 
| Christian Borntraeger | ce7e9fa | 2007-11-20 11:13:36 +0100 | [diff] [blame] | 498 | 		if (mapping) { | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 499 | 			ret = page_mkclean_file(mapping, page); | 
| Christian Borntraeger | ce7e9fa | 2007-11-20 11:13:36 +0100 | [diff] [blame] | 500 | 			if (page_test_dirty(page)) { | 
 | 501 | 				page_clear_dirty(page); | 
 | 502 | 				ret = 1; | 
 | 503 | 			} | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 504 | 		} | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 505 | 	} | 
 | 506 |  | 
 | 507 | 	return ret; | 
 | 508 | } | 
| Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 509 | EXPORT_SYMBOL_GPL(page_mkclean); | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 510 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | /** | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 512 |  * __page_set_anon_rmap - setup new anonymous rmap | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 513 |  * @page:	the page to add the mapping to | 
 | 514 |  * @vma:	the vm area in which the mapping is added | 
 | 515 |  * @address:	the user virtual address mapped | 
 | 516 |  */ | 
 | 517 | static void __page_set_anon_rmap(struct page *page, | 
 | 518 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 519 | { | 
 | 520 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 521 |  | 
 | 522 | 	BUG_ON(!anon_vma); | 
 | 523 | 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 
 | 524 | 	page->mapping = (struct address_space *) anon_vma; | 
 | 525 |  | 
 | 526 | 	page->index = linear_page_index(vma, address); | 
 | 527 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 528 | 	/* | 
 | 529 | 	 * nr_mapped state can be updated without turning off | 
 | 530 | 	 * interrupts because it is not modified via interrupt. | 
 | 531 | 	 */ | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 532 | 	__inc_zone_page_state(page, NR_ANON_PAGES); | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 533 | } | 
 | 534 |  | 
 | 535 | /** | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 536 |  * __page_check_anon_rmap - sanity check anonymous rmap addition | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 537 |  * @page:	the page to add the mapping to | 
 | 538 |  * @vma:	the vm area in which the mapping is added | 
 | 539 |  * @address:	the user virtual address mapped | 
 | 540 |  */ | 
 | 541 | static void __page_check_anon_rmap(struct page *page, | 
 | 542 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 543 | { | 
 | 544 | #ifdef CONFIG_DEBUG_VM | 
 | 545 | 	/* | 
 | 546 | 	 * The page's anon-rmap details (mapping and index) are guaranteed to | 
 | 547 | 	 * be set up correctly at this point. | 
 | 548 | 	 * | 
 | 549 | 	 * We have exclusion against page_add_anon_rmap because the caller | 
 | 550 | 	 * always holds the page locked, except if called from page_dup_rmap, | 
 | 551 | 	 * in which case the page is already known to be setup. | 
 | 552 | 	 * | 
 | 553 | 	 * We have exclusion against page_add_new_anon_rmap because those pages | 
 | 554 | 	 * are initially only visible via the pagetables, and the pte is locked | 
 | 555 | 	 * over the call to page_add_new_anon_rmap. | 
 | 556 | 	 */ | 
 | 557 | 	struct anon_vma *anon_vma = vma->anon_vma; | 
 | 558 | 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 
 | 559 | 	BUG_ON(page->mapping != (struct address_space *)anon_vma); | 
 | 560 | 	BUG_ON(page->index != linear_page_index(vma, address)); | 
 | 561 | #endif | 
 | 562 | } | 
 | 563 |  | 
 | 564 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 |  * page_add_anon_rmap - add pte mapping to an anonymous page | 
 | 566 |  * @page:	the page to add the mapping to | 
 | 567 |  * @vma:	the vm area in which the mapping is added | 
 | 568 |  * @address:	the user virtual address mapped | 
 | 569 |  * | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 570 |  * The caller needs to hold the pte lock and the page must be locked. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 |  */ | 
 | 572 | void page_add_anon_rmap(struct page *page, | 
 | 573 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 574 | { | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 575 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 576 | 	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 577 | 	if (atomic_inc_and_test(&page->_mapcount)) | 
 | 578 | 		__page_set_anon_rmap(page, vma, address); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 579 | 	else { | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 580 | 		__page_check_anon_rmap(page, vma, address); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 581 | 		/* | 
 | 582 | 		 * We unconditionally charged during prepare, we uncharge here | 
 | 583 | 		 * This takes care of balancing the reference counts | 
 | 584 | 		 */ | 
 | 585 | 		mem_cgroup_uncharge_page(page); | 
 | 586 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | } | 
 | 588 |  | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 589 | /** | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 590 |  * page_add_new_anon_rmap - add pte mapping to a new anonymous page | 
 | 591 |  * @page:	the page to add the mapping to | 
 | 592 |  * @vma:	the vm area in which the mapping is added | 
 | 593 |  * @address:	the user virtual address mapped | 
 | 594 |  * | 
 | 595 |  * Same as page_add_anon_rmap but must only be called on *new* pages. | 
 | 596 |  * This means the inc-and-test can be bypassed. | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 597 |  * Page does not have to be locked. | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 598 |  */ | 
 | 599 | void page_add_new_anon_rmap(struct page *page, | 
 | 600 | 	struct vm_area_struct *vma, unsigned long address) | 
 | 601 | { | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 602 | 	BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 603 | 	atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ | 
 | 604 | 	__page_set_anon_rmap(page, vma, address); | 
 | 605 | } | 
 | 606 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | /** | 
 | 608 |  * page_add_file_rmap - add pte mapping to a file page | 
 | 609 |  * @page: the page to add the mapping to | 
 | 610 |  * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 611 |  * The caller needs to hold the pte lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 |  */ | 
 | 613 | void page_add_file_rmap(struct page *page) | 
 | 614 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | 	if (atomic_inc_and_test(&page->_mapcount)) | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 616 | 		__inc_zone_page_state(page, NR_FILE_MAPPED); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 617 | 	else | 
 | 618 | 		/* | 
 | 619 | 		 * We unconditionally charged during prepare, we uncharge here | 
 | 620 | 		 * This takes care of balancing the reference counts | 
 | 621 | 		 */ | 
 | 622 | 		mem_cgroup_uncharge_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | } | 
 | 624 |  | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 625 | #ifdef CONFIG_DEBUG_VM | 
 | 626 | /** | 
 | 627 |  * page_dup_rmap - duplicate pte mapping to a page | 
 | 628 |  * @page:	the page to add the mapping to | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 629 |  * @vma:	the vm area being duplicated | 
 | 630 |  * @address:	the user virtual address mapped | 
| Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 631 |  * | 
 | 632 |  * For copy_page_range only: minimal extract from page_add_file_rmap / | 
 | 633 |  * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's | 
 | 634 |  * quicker. | 
 | 635 |  * | 
 | 636 |  * The caller needs to hold the pte lock. | 
 | 637 |  */ | 
 | 638 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) | 
 | 639 | { | 
 | 640 | 	BUG_ON(page_mapcount(page) == 0); | 
 | 641 | 	if (PageAnon(page)) | 
 | 642 | 		__page_check_anon_rmap(page, vma, address); | 
 | 643 | 	atomic_inc(&page->_mapcount); | 
 | 644 | } | 
 | 645 | #endif | 
 | 646 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | /** | 
 | 648 |  * page_remove_rmap - take down pte mapping from a page | 
 | 649 |  * @page: page to remove mapping from | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 650 |  * @vma: the vm area in which the mapping is removed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 |  * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 652 |  * The caller needs to hold the pte lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 |  */ | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 654 | void page_remove_rmap(struct page *page, struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | 	if (atomic_add_negative(-1, &page->_mapcount)) { | 
| Nick Piggin | b7ab795 | 2006-03-22 00:08:42 -0800 | [diff] [blame] | 657 | 		if (unlikely(page_mapcount(page) < 0)) { | 
| Dave Jones | ef2bf0d | 2006-01-08 01:01:00 -0800 | [diff] [blame] | 658 | 			printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 659 | 			printk (KERN_EMERG "  page pfn = %lx\n", page_to_pfn(page)); | 
| Dave Jones | ef2bf0d | 2006-01-08 01:01:00 -0800 | [diff] [blame] | 660 | 			printk (KERN_EMERG "  page->flags = %lx\n", page->flags); | 
 | 661 | 			printk (KERN_EMERG "  page->count = %x\n", page_count(page)); | 
 | 662 | 			printk (KERN_EMERG "  page->mapping = %p\n", page->mapping); | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 663 | 			print_symbol (KERN_EMERG "  vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 664 | 			if (vma->vm_ops) { | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 665 | 				print_symbol (KERN_EMERG "  vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); | 
 | 666 | 			} | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 667 | 			if (vma->vm_file && vma->vm_file->f_op) | 
 | 668 | 				print_symbol (KERN_EMERG "  vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); | 
| Dave Jones | b16bc64 | 2006-10-11 01:21:27 -0700 | [diff] [blame] | 669 | 			BUG(); | 
| Dave Jones | ef2bf0d | 2006-01-08 01:01:00 -0800 | [diff] [blame] | 670 | 		} | 
| Dave Jones | b16bc64 | 2006-10-11 01:21:27 -0700 | [diff] [blame] | 671 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | 		/* | 
 | 673 | 		 * It would be tidy to reset the PageAnon mapping here, | 
 | 674 | 		 * but that might overwrite a racing page_add_anon_rmap | 
 | 675 | 		 * which increments mapcount after us but sets mapping | 
 | 676 | 		 * before us: so leave the reset to free_hot_cold_page, | 
 | 677 | 		 * and remember that it's only reliable while mapped. | 
 | 678 | 		 * Leaving it set also helps swapoff to reinstate ptes | 
 | 679 | 		 * faster for those pages still in swapcache. | 
 | 680 | 		 */ | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 681 | 		if (page_test_dirty(page)) { | 
 | 682 | 			page_clear_dirty(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | 			set_page_dirty(page); | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 684 | 		} | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 685 | 		mem_cgroup_uncharge_page(page); | 
 | 686 |  | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 687 | 		__dec_zone_page_state(page, | 
 | 688 | 				PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | 	} | 
 | 690 | } | 
 | 691 |  | 
 | 692 | /* | 
 | 693 |  * Subfunctions of try_to_unmap: try_to_unmap_one called | 
 | 694 |  * repeatedly from either try_to_unmap_anon or try_to_unmap_file. | 
 | 695 |  */ | 
| Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 696 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 697 | 				int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | { | 
 | 699 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 700 | 	unsigned long address; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | 	pte_t *pte; | 
 | 702 | 	pte_t pteval; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 703 | 	spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | 	int ret = SWAP_AGAIN; | 
 | 705 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | 	address = vma_address(page, vma); | 
 | 707 | 	if (address == -EFAULT) | 
 | 708 | 		goto out; | 
 | 709 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 710 | 	pte = page_check_address(page, mm, address, &ptl); | 
 | 711 | 	if (!pte) | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 712 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 |  | 
 | 714 | 	/* | 
 | 715 | 	 * If the page is mlock()d, we cannot swap it out. | 
 | 716 | 	 * If it's recently referenced (perhaps page_referenced | 
 | 717 | 	 * skipped over this mm) then we should reactivate it. | 
 | 718 | 	 */ | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 719 | 	if (!migration && ((vma->vm_flags & VM_LOCKED) || | 
 | 720 | 			(ptep_clear_flush_young(vma, address, pte)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | 		ret = SWAP_FAIL; | 
 | 722 | 		goto out_unmap; | 
 | 723 | 	} | 
 | 724 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | 	/* Nuke the page table entry. */ | 
 | 726 | 	flush_cache_page(vma, address, page_to_pfn(page)); | 
 | 727 | 	pteval = ptep_clear_flush(vma, address, pte); | 
 | 728 |  | 
 | 729 | 	/* Move the dirty bit to the physical page now the pte is gone. */ | 
 | 730 | 	if (pte_dirty(pteval)) | 
 | 731 | 		set_page_dirty(page); | 
 | 732 |  | 
| Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 733 | 	/* Update high watermark before we lower rss */ | 
 | 734 | 	update_hiwater_rss(mm); | 
 | 735 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | 	if (PageAnon(page)) { | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 737 | 		swp_entry_t entry = { .val = page_private(page) }; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 738 |  | 
 | 739 | 		if (PageSwapCache(page)) { | 
 | 740 | 			/* | 
 | 741 | 			 * Store the swap location in the pte. | 
 | 742 | 			 * See handle_pte_fault() ... | 
 | 743 | 			 */ | 
 | 744 | 			swap_duplicate(entry); | 
 | 745 | 			if (list_empty(&mm->mmlist)) { | 
 | 746 | 				spin_lock(&mmlist_lock); | 
 | 747 | 				if (list_empty(&mm->mmlist)) | 
 | 748 | 					list_add(&mm->mmlist, &init_mm.mmlist); | 
 | 749 | 				spin_unlock(&mmlist_lock); | 
 | 750 | 			} | 
| Christoph Lameter | 442c913 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 751 | 			dec_mm_counter(mm, anon_rss); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 752 | #ifdef CONFIG_MIGRATION | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 753 | 		} else { | 
 | 754 | 			/* | 
 | 755 | 			 * Store the pfn of the page in a special migration | 
 | 756 | 			 * pte. do_swap_page() will wait until the migration | 
 | 757 | 			 * pte is removed and then restart fault handling. | 
 | 758 | 			 */ | 
 | 759 | 			BUG_ON(!migration); | 
 | 760 | 			entry = make_migration_entry(page, pte_write(pteval)); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 761 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | 		} | 
 | 763 | 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 
 | 764 | 		BUG_ON(pte_file(*pte)); | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 765 | 	} else | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 766 | #ifdef CONFIG_MIGRATION | 
 | 767 | 	if (migration) { | 
 | 768 | 		/* Establish migration entry for a file page */ | 
 | 769 | 		swp_entry_t entry; | 
 | 770 | 		entry = make_migration_entry(page, pte_write(pteval)); | 
 | 771 | 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 
 | 772 | 	} else | 
 | 773 | #endif | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 774 | 		dec_mm_counter(mm, file_rss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 776 |  | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 777 | 	page_remove_rmap(page, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | 	page_cache_release(page); | 
 | 779 |  | 
 | 780 | out_unmap: | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 781 | 	pte_unmap_unlock(pte, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | out: | 
 | 783 | 	return ret; | 
 | 784 | } | 
 | 785 |  | 
 | 786 | /* | 
 | 787 |  * objrmap doesn't work for nonlinear VMAs because the assumption that | 
 | 788 |  * offset-into-file correlates with offset-into-virtual-addresses does not hold. | 
 | 789 |  * Consequently, given a particular page and its ->index, we cannot locate the | 
 | 790 |  * ptes which are mapping that page without an exhaustive linear search. | 
 | 791 |  * | 
 | 792 |  * So what this code does is a mini "virtual scan" of each nonlinear VMA which | 
 | 793 |  * maps the file to which the target page belongs.  The ->vm_private_data field | 
 | 794 |  * holds the current cursor into that scan.  Successive searches will circulate | 
 | 795 |  * around the vma's virtual address space. | 
 | 796 |  * | 
 | 797 |  * So as more replacement pressure is applied to the pages in a nonlinear VMA, | 
 | 798 |  * more scanning pressure is placed against them as well.   Eventually pages | 
 | 799 |  * will become fully unmapped and are eligible for eviction. | 
 | 800 |  * | 
 | 801 |  * For very sparsely populated VMAs this is a little inefficient - chances are | 
 | 802 |  * there there won't be many ptes located within the scan cluster.  In this case | 
 | 803 |  * maybe we could scan further - to the end of the pte page, perhaps. | 
 | 804 |  */ | 
 | 805 | #define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE) | 
 | 806 | #define CLUSTER_MASK	(~(CLUSTER_SIZE - 1)) | 
 | 807 |  | 
 | 808 | static void try_to_unmap_cluster(unsigned long cursor, | 
 | 809 | 	unsigned int *mapcount, struct vm_area_struct *vma) | 
 | 810 | { | 
 | 811 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 812 | 	pgd_t *pgd; | 
 | 813 | 	pud_t *pud; | 
 | 814 | 	pmd_t *pmd; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 815 | 	pte_t *pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | 	pte_t pteval; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 817 | 	spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | 	struct page *page; | 
 | 819 | 	unsigned long address; | 
 | 820 | 	unsigned long end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | 	address = (vma->vm_start + cursor) & CLUSTER_MASK; | 
 | 823 | 	end = address + CLUSTER_SIZE; | 
 | 824 | 	if (address < vma->vm_start) | 
 | 825 | 		address = vma->vm_start; | 
 | 826 | 	if (end > vma->vm_end) | 
 | 827 | 		end = vma->vm_end; | 
 | 828 |  | 
 | 829 | 	pgd = pgd_offset(mm, address); | 
 | 830 | 	if (!pgd_present(*pgd)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 831 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 |  | 
 | 833 | 	pud = pud_offset(pgd, address); | 
 | 834 | 	if (!pud_present(*pud)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 835 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 |  | 
 | 837 | 	pmd = pmd_offset(pud, address); | 
 | 838 | 	if (!pmd_present(*pmd)) | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 839 | 		return; | 
 | 840 |  | 
 | 841 | 	pte = pte_offset_map_lock(mm, pmd, address, &ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 |  | 
| Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 843 | 	/* Update high watermark before we lower rss */ | 
 | 844 | 	update_hiwater_rss(mm); | 
 | 845 |  | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 846 | 	for (; address < end; pte++, address += PAGE_SIZE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | 		if (!pte_present(*pte)) | 
 | 848 | 			continue; | 
| Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 849 | 		page = vm_normal_page(vma, address, *pte); | 
 | 850 | 		BUG_ON(!page || PageAnon(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 |  | 
 | 852 | 		if (ptep_clear_flush_young(vma, address, pte)) | 
 | 853 | 			continue; | 
 | 854 |  | 
 | 855 | 		/* Nuke the page table entry. */ | 
| Ben Collins | eca3513 | 2005-11-29 11:45:26 -0800 | [diff] [blame] | 856 | 		flush_cache_page(vma, address, pte_pfn(*pte)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | 		pteval = ptep_clear_flush(vma, address, pte); | 
 | 858 |  | 
 | 859 | 		/* If nonlinear, store the file page offset in the pte. */ | 
 | 860 | 		if (page->index != linear_page_index(vma, address)) | 
 | 861 | 			set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); | 
 | 862 |  | 
 | 863 | 		/* Move the dirty bit to the physical page now the pte is gone. */ | 
 | 864 | 		if (pte_dirty(pteval)) | 
 | 865 | 			set_page_dirty(page); | 
 | 866 |  | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 867 | 		page_remove_rmap(page, vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | 		page_cache_release(page); | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 869 | 		dec_mm_counter(mm, file_rss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | 		(*mapcount)--; | 
 | 871 | 	} | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 872 | 	pte_unmap_unlock(pte - 1, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | } | 
 | 874 |  | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 875 | static int try_to_unmap_anon(struct page *page, int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | { | 
 | 877 | 	struct anon_vma *anon_vma; | 
 | 878 | 	struct vm_area_struct *vma; | 
 | 879 | 	int ret = SWAP_AGAIN; | 
 | 880 |  | 
 | 881 | 	anon_vma = page_lock_anon_vma(page); | 
 | 882 | 	if (!anon_vma) | 
 | 883 | 		return ret; | 
 | 884 |  | 
 | 885 | 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 886 | 		ret = try_to_unmap_one(page, vma, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | 		if (ret == SWAP_FAIL || !page_mapped(page)) | 
 | 888 | 			break; | 
 | 889 | 	} | 
| Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 890 |  | 
 | 891 | 	page_unlock_anon_vma(anon_vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | 	return ret; | 
 | 893 | } | 
 | 894 |  | 
 | 895 | /** | 
 | 896 |  * try_to_unmap_file - unmap file page using the object-based rmap method | 
 | 897 |  * @page: the page to unmap | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 898 |  * @migration: migration flag | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 |  * | 
 | 900 |  * Find all the mappings of a page using the mapping pointer and the vma chains | 
 | 901 |  * contained in the address_space struct it points to. | 
 | 902 |  * | 
 | 903 |  * This function is only called from try_to_unmap for object-based pages. | 
 | 904 |  */ | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 905 | static int try_to_unmap_file(struct page *page, int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | { | 
 | 907 | 	struct address_space *mapping = page->mapping; | 
 | 908 | 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 909 | 	struct vm_area_struct *vma; | 
 | 910 | 	struct prio_tree_iter iter; | 
 | 911 | 	int ret = SWAP_AGAIN; | 
 | 912 | 	unsigned long cursor; | 
 | 913 | 	unsigned long max_nl_cursor = 0; | 
 | 914 | 	unsigned long max_nl_size = 0; | 
 | 915 | 	unsigned int mapcount; | 
 | 916 |  | 
 | 917 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 918 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 919 | 		ret = try_to_unmap_one(page, vma, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | 		if (ret == SWAP_FAIL || !page_mapped(page)) | 
 | 921 | 			goto out; | 
 | 922 | 	} | 
 | 923 |  | 
 | 924 | 	if (list_empty(&mapping->i_mmap_nonlinear)) | 
 | 925 | 		goto out; | 
 | 926 |  | 
 | 927 | 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 
 | 928 | 						shared.vm_set.list) { | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 929 | 		if ((vma->vm_flags & VM_LOCKED) && !migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | 			continue; | 
 | 931 | 		cursor = (unsigned long) vma->vm_private_data; | 
 | 932 | 		if (cursor > max_nl_cursor) | 
 | 933 | 			max_nl_cursor = cursor; | 
 | 934 | 		cursor = vma->vm_end - vma->vm_start; | 
 | 935 | 		if (cursor > max_nl_size) | 
 | 936 | 			max_nl_size = cursor; | 
 | 937 | 	} | 
 | 938 |  | 
 | 939 | 	if (max_nl_size == 0) {	/* any nonlinears locked or reserved */ | 
 | 940 | 		ret = SWAP_FAIL; | 
 | 941 | 		goto out; | 
 | 942 | 	} | 
 | 943 |  | 
 | 944 | 	/* | 
 | 945 | 	 * We don't try to search for this page in the nonlinear vmas, | 
 | 946 | 	 * and page_referenced wouldn't have found it anyway.  Instead | 
 | 947 | 	 * just walk the nonlinear vmas trying to age and unmap some. | 
 | 948 | 	 * The mapcount of the page we came in with is irrelevant, | 
 | 949 | 	 * but even so use it as a guide to how hard we should try? | 
 | 950 | 	 */ | 
 | 951 | 	mapcount = page_mapcount(page); | 
 | 952 | 	if (!mapcount) | 
 | 953 | 		goto out; | 
 | 954 | 	cond_resched_lock(&mapping->i_mmap_lock); | 
 | 955 |  | 
 | 956 | 	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; | 
 | 957 | 	if (max_nl_cursor == 0) | 
 | 958 | 		max_nl_cursor = CLUSTER_SIZE; | 
 | 959 |  | 
 | 960 | 	do { | 
 | 961 | 		list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 
 | 962 | 						shared.vm_set.list) { | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 963 | 			if ((vma->vm_flags & VM_LOCKED) && !migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | 				continue; | 
 | 965 | 			cursor = (unsigned long) vma->vm_private_data; | 
| Hugh Dickins | 839b968 | 2005-09-03 15:54:43 -0700 | [diff] [blame] | 966 | 			while ( cursor < max_nl_cursor && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | 				cursor < vma->vm_end - vma->vm_start) { | 
 | 968 | 				try_to_unmap_cluster(cursor, &mapcount, vma); | 
 | 969 | 				cursor += CLUSTER_SIZE; | 
 | 970 | 				vma->vm_private_data = (void *) cursor; | 
 | 971 | 				if ((int)mapcount <= 0) | 
 | 972 | 					goto out; | 
 | 973 | 			} | 
 | 974 | 			vma->vm_private_data = (void *) max_nl_cursor; | 
 | 975 | 		} | 
 | 976 | 		cond_resched_lock(&mapping->i_mmap_lock); | 
 | 977 | 		max_nl_cursor += CLUSTER_SIZE; | 
 | 978 | 	} while (max_nl_cursor <= max_nl_size); | 
 | 979 |  | 
 | 980 | 	/* | 
 | 981 | 	 * Don't loop forever (perhaps all the remaining pages are | 
 | 982 | 	 * in locked vmas).  Reset cursor on all unreserved nonlinear | 
 | 983 | 	 * vmas, now forgetting on which ones it had fallen behind. | 
 | 984 | 	 */ | 
| Hugh Dickins | 101d2be | 2005-11-21 21:32:16 -0800 | [diff] [blame] | 985 | 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) | 
 | 986 | 		vma->vm_private_data = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | out: | 
 | 988 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 989 | 	return ret; | 
 | 990 | } | 
 | 991 |  | 
 | 992 | /** | 
 | 993 |  * try_to_unmap - try to remove all page table mappings to a page | 
 | 994 |  * @page: the page to get unmapped | 
| Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 995 |  * @migration: migration flag | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 |  * | 
 | 997 |  * Tries to remove all the page table entries which are mapping this | 
 | 998 |  * page, used in the pageout path.  Caller must hold the page lock. | 
 | 999 |  * Return values are: | 
 | 1000 |  * | 
 | 1001 |  * SWAP_SUCCESS	- we succeeded in removing all mappings | 
 | 1002 |  * SWAP_AGAIN	- we missed a mapping, try again later | 
 | 1003 |  * SWAP_FAIL	- the page is unswappable | 
 | 1004 |  */ | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 1005 | int try_to_unmap(struct page *page, int migration) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | { | 
 | 1007 | 	int ret; | 
 | 1008 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | 	BUG_ON(!PageLocked(page)); | 
 | 1010 |  | 
 | 1011 | 	if (PageAnon(page)) | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 1012 | 		ret = try_to_unmap_anon(page, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | 	else | 
| Christoph Lameter | 7352349 | 2006-06-23 02:03:27 -0700 | [diff] [blame] | 1014 | 		ret = try_to_unmap_file(page, migration); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 |  | 
 | 1016 | 	if (!page_mapped(page)) | 
 | 1017 | 		ret = SWAP_SUCCESS; | 
 | 1018 | 	return ret; | 
 | 1019 | } | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 1020 |  |