| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * mm/rmap.c - physical to virtual reverse mappings | 
|  | 3 | * | 
|  | 4 | * Copyright 2001, Rik van Riel <riel@conectiva.com.br> | 
|  | 5 | * Released under the General Public License (GPL). | 
|  | 6 | * | 
|  | 7 | * Simple, low overhead reverse mapping scheme. | 
|  | 8 | * Please try to keep this thing as modular as possible. | 
|  | 9 | * | 
|  | 10 | * Provides methods for unmapping each kind of mapped page: | 
|  | 11 | * the anon methods track anonymous pages, and | 
|  | 12 | * the file methods track pages belonging to an inode. | 
|  | 13 | * | 
|  | 14 | * Original design by Rik van Riel <riel@conectiva.com.br> 2001 | 
|  | 15 | * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 | 
|  | 16 | * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 | 
|  | 17 | * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * Lock ordering in mm: | 
|  | 22 | * | 
|  | 23 | * inode->i_sem	(while writing or truncating, not reading or faulting) | 
|  | 24 | *   inode->i_alloc_sem | 
|  | 25 | * | 
|  | 26 | * When a page fault occurs in writing from user to file, down_read | 
|  | 27 | * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within | 
|  | 28 | * down_read of mmap_sem; i_sem and down_write of mmap_sem are never | 
|  | 29 | * taken together; in truncation, i_sem is taken outermost. | 
|  | 30 | * | 
|  | 31 | * mm->mmap_sem | 
|  | 32 | *   page->flags PG_locked (lock_page) | 
|  | 33 | *     mapping->i_mmap_lock | 
|  | 34 | *       anon_vma->lock | 
|  | 35 | *         mm->page_table_lock | 
|  | 36 | *           zone->lru_lock (in mark_page_accessed) | 
|  | 37 | *           swap_list_lock (in swap_free etc's swap_info_get) | 
|  | 38 | *             mmlist_lock (in mmput, drain_mmlist and others) | 
|  | 39 | *             swap_device_lock (in swap_duplicate, swap_info_get) | 
|  | 40 | *             mapping->private_lock (in __set_page_dirty_buffers) | 
|  | 41 | *             inode_lock (in set_page_dirty's __mark_inode_dirty) | 
|  | 42 | *               sb_lock (within inode_lock in fs/fs-writeback.c) | 
|  | 43 | *               mapping->tree_lock (widely used, in set_page_dirty, | 
|  | 44 | *                         in arch-dependent flush_dcache_mmap_lock, | 
|  | 45 | *                         within inode_lock in __sync_single_inode) | 
|  | 46 | */ | 
|  | 47 |  | 
|  | 48 | #include <linux/mm.h> | 
|  | 49 | #include <linux/pagemap.h> | 
|  | 50 | #include <linux/swap.h> | 
|  | 51 | #include <linux/swapops.h> | 
|  | 52 | #include <linux/slab.h> | 
|  | 53 | #include <linux/init.h> | 
|  | 54 | #include <linux/rmap.h> | 
|  | 55 | #include <linux/rcupdate.h> | 
|  | 56 |  | 
|  | 57 | #include <asm/tlbflush.h> | 
|  | 58 |  | 
|  | 59 | //#define RMAP_DEBUG /* can be enabled only for debugging */ | 
|  | 60 |  | 
|  | 61 | kmem_cache_t *anon_vma_cachep; | 
|  | 62 |  | 
|  | 63 | static inline void validate_anon_vma(struct vm_area_struct *find_vma) | 
|  | 64 | { | 
|  | 65 | #ifdef RMAP_DEBUG | 
|  | 66 | struct anon_vma *anon_vma = find_vma->anon_vma; | 
|  | 67 | struct vm_area_struct *vma; | 
|  | 68 | unsigned int mapcount = 0; | 
|  | 69 | int found = 0; | 
|  | 70 |  | 
|  | 71 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 
|  | 72 | mapcount++; | 
|  | 73 | BUG_ON(mapcount > 100000); | 
|  | 74 | if (vma == find_vma) | 
|  | 75 | found = 1; | 
|  | 76 | } | 
|  | 77 | BUG_ON(!found); | 
|  | 78 | #endif | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | /* This must be called under the mmap_sem. */ | 
|  | 82 | int anon_vma_prepare(struct vm_area_struct *vma) | 
|  | 83 | { | 
|  | 84 | struct anon_vma *anon_vma = vma->anon_vma; | 
|  | 85 |  | 
|  | 86 | might_sleep(); | 
|  | 87 | if (unlikely(!anon_vma)) { | 
|  | 88 | struct mm_struct *mm = vma->vm_mm; | 
|  | 89 | struct anon_vma *allocated, *locked; | 
|  | 90 |  | 
|  | 91 | anon_vma = find_mergeable_anon_vma(vma); | 
|  | 92 | if (anon_vma) { | 
|  | 93 | allocated = NULL; | 
|  | 94 | locked = anon_vma; | 
|  | 95 | spin_lock(&locked->lock); | 
|  | 96 | } else { | 
|  | 97 | anon_vma = anon_vma_alloc(); | 
|  | 98 | if (unlikely(!anon_vma)) | 
|  | 99 | return -ENOMEM; | 
|  | 100 | allocated = anon_vma; | 
|  | 101 | locked = NULL; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | /* page_table_lock to protect against threads */ | 
|  | 105 | spin_lock(&mm->page_table_lock); | 
|  | 106 | if (likely(!vma->anon_vma)) { | 
|  | 107 | vma->anon_vma = anon_vma; | 
|  | 108 | list_add(&vma->anon_vma_node, &anon_vma->head); | 
|  | 109 | allocated = NULL; | 
|  | 110 | } | 
|  | 111 | spin_unlock(&mm->page_table_lock); | 
|  | 112 |  | 
|  | 113 | if (locked) | 
|  | 114 | spin_unlock(&locked->lock); | 
|  | 115 | if (unlikely(allocated)) | 
|  | 116 | anon_vma_free(allocated); | 
|  | 117 | } | 
|  | 118 | return 0; | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) | 
|  | 122 | { | 
|  | 123 | BUG_ON(vma->anon_vma != next->anon_vma); | 
|  | 124 | list_del(&next->anon_vma_node); | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | void __anon_vma_link(struct vm_area_struct *vma) | 
|  | 128 | { | 
|  | 129 | struct anon_vma *anon_vma = vma->anon_vma; | 
|  | 130 |  | 
|  | 131 | if (anon_vma) { | 
|  | 132 | list_add(&vma->anon_vma_node, &anon_vma->head); | 
|  | 133 | validate_anon_vma(vma); | 
|  | 134 | } | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | void anon_vma_link(struct vm_area_struct *vma) | 
|  | 138 | { | 
|  | 139 | struct anon_vma *anon_vma = vma->anon_vma; | 
|  | 140 |  | 
|  | 141 | if (anon_vma) { | 
|  | 142 | spin_lock(&anon_vma->lock); | 
|  | 143 | list_add(&vma->anon_vma_node, &anon_vma->head); | 
|  | 144 | validate_anon_vma(vma); | 
|  | 145 | spin_unlock(&anon_vma->lock); | 
|  | 146 | } | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | void anon_vma_unlink(struct vm_area_struct *vma) | 
|  | 150 | { | 
|  | 151 | struct anon_vma *anon_vma = vma->anon_vma; | 
|  | 152 | int empty; | 
|  | 153 |  | 
|  | 154 | if (!anon_vma) | 
|  | 155 | return; | 
|  | 156 |  | 
|  | 157 | spin_lock(&anon_vma->lock); | 
|  | 158 | validate_anon_vma(vma); | 
|  | 159 | list_del(&vma->anon_vma_node); | 
|  | 160 |  | 
|  | 161 | /* We must garbage collect the anon_vma if it's empty */ | 
|  | 162 | empty = list_empty(&anon_vma->head); | 
|  | 163 | spin_unlock(&anon_vma->lock); | 
|  | 164 |  | 
|  | 165 | if (empty) | 
|  | 166 | anon_vma_free(anon_vma); | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) | 
|  | 170 | { | 
|  | 171 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | 
|  | 172 | SLAB_CTOR_CONSTRUCTOR) { | 
|  | 173 | struct anon_vma *anon_vma = data; | 
|  | 174 |  | 
|  | 175 | spin_lock_init(&anon_vma->lock); | 
|  | 176 | INIT_LIST_HEAD(&anon_vma->head); | 
|  | 177 | } | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | void __init anon_vma_init(void) | 
|  | 181 | { | 
|  | 182 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), | 
|  | 183 | 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | /* | 
|  | 187 | * Getting a lock on a stable anon_vma from a page off the LRU is | 
|  | 188 | * tricky: page_lock_anon_vma rely on RCU to guard against the races. | 
|  | 189 | */ | 
|  | 190 | static struct anon_vma *page_lock_anon_vma(struct page *page) | 
|  | 191 | { | 
|  | 192 | struct anon_vma *anon_vma = NULL; | 
|  | 193 | unsigned long anon_mapping; | 
|  | 194 |  | 
|  | 195 | rcu_read_lock(); | 
|  | 196 | anon_mapping = (unsigned long) page->mapping; | 
|  | 197 | if (!(anon_mapping & PAGE_MAPPING_ANON)) | 
|  | 198 | goto out; | 
|  | 199 | if (!page_mapped(page)) | 
|  | 200 | goto out; | 
|  | 201 |  | 
|  | 202 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | 
|  | 203 | spin_lock(&anon_vma->lock); | 
|  | 204 | out: | 
|  | 205 | rcu_read_unlock(); | 
|  | 206 | return anon_vma; | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | /* | 
|  | 210 | * At what user virtual address is page expected in vma? | 
|  | 211 | */ | 
|  | 212 | static inline unsigned long | 
|  | 213 | vma_address(struct page *page, struct vm_area_struct *vma) | 
|  | 214 | { | 
|  | 215 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
|  | 216 | unsigned long address; | 
|  | 217 |  | 
|  | 218 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 
|  | 219 | if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { | 
|  | 220 | /* page should be within any vma from prio_tree_next */ | 
|  | 221 | BUG_ON(!PageAnon(page)); | 
|  | 222 | return -EFAULT; | 
|  | 223 | } | 
|  | 224 | return address; | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | /* | 
|  | 228 | * At what user virtual address is page expected in vma? checking that the | 
|  | 229 | * page matches the vma: currently only used by unuse_process, on anon pages. | 
|  | 230 | */ | 
|  | 231 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 
|  | 232 | { | 
|  | 233 | if (PageAnon(page)) { | 
|  | 234 | if ((void *)vma->anon_vma != | 
|  | 235 | (void *)page->mapping - PAGE_MAPPING_ANON) | 
|  | 236 | return -EFAULT; | 
|  | 237 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | 
|  | 238 | if (vma->vm_file->f_mapping != page->mapping) | 
|  | 239 | return -EFAULT; | 
|  | 240 | } else | 
|  | 241 | return -EFAULT; | 
|  | 242 | return vma_address(page, vma); | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | /* | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 246 | * Check that @page is mapped at @address into @mm. | 
|  | 247 | * | 
|  | 248 | * On success returns with mapped pte and locked mm->page_table_lock. | 
|  | 249 | */ | 
|  | 250 | static pte_t *page_check_address(struct page *page, struct mm_struct *mm, | 
|  | 251 | unsigned long address) | 
|  | 252 | { | 
|  | 253 | pgd_t *pgd; | 
|  | 254 | pud_t *pud; | 
|  | 255 | pmd_t *pmd; | 
|  | 256 | pte_t *pte; | 
|  | 257 |  | 
|  | 258 | /* | 
|  | 259 | * We need the page_table_lock to protect us from page faults, | 
|  | 260 | * munmap, fork, etc... | 
|  | 261 | */ | 
|  | 262 | spin_lock(&mm->page_table_lock); | 
|  | 263 | pgd = pgd_offset(mm, address); | 
|  | 264 | if (likely(pgd_present(*pgd))) { | 
|  | 265 | pud = pud_offset(pgd, address); | 
|  | 266 | if (likely(pud_present(*pud))) { | 
|  | 267 | pmd = pmd_offset(pud, address); | 
|  | 268 | if (likely(pmd_present(*pmd))) { | 
|  | 269 | pte = pte_offset_map(pmd, address); | 
|  | 270 | if (likely(pte_present(*pte) && | 
|  | 271 | page_to_pfn(page) == pte_pfn(*pte))) | 
|  | 272 | return pte; | 
|  | 273 | pte_unmap(pte); | 
|  | 274 | } | 
|  | 275 | } | 
|  | 276 | } | 
|  | 277 | spin_unlock(&mm->page_table_lock); | 
|  | 278 | return ERR_PTR(-ENOENT); | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | * Subfunctions of page_referenced: page_referenced_one called | 
|  | 283 | * repeatedly from either page_referenced_anon or page_referenced_file. | 
|  | 284 | */ | 
|  | 285 | static int page_referenced_one(struct page *page, | 
|  | 286 | struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token) | 
|  | 287 | { | 
|  | 288 | struct mm_struct *mm = vma->vm_mm; | 
|  | 289 | unsigned long address; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | pte_t *pte; | 
|  | 291 | int referenced = 0; | 
|  | 292 |  | 
|  | 293 | if (!get_mm_counter(mm, rss)) | 
|  | 294 | goto out; | 
|  | 295 | address = vma_address(page, vma); | 
|  | 296 | if (address == -EFAULT) | 
|  | 297 | goto out; | 
|  | 298 |  | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 299 | pte = page_check_address(page, mm, address); | 
|  | 300 | if (!IS_ERR(pte)) { | 
|  | 301 | if (ptep_clear_flush_young(vma, address, pte)) | 
|  | 302 | referenced++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 |  | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 304 | if (mm != current->mm && !ignore_token && has_swap_token(mm)) | 
|  | 305 | referenced++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 |  | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 307 | (*mapcount)--; | 
|  | 308 | pte_unmap(pte); | 
|  | 309 | spin_unlock(&mm->page_table_lock); | 
|  | 310 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | out: | 
|  | 312 | return referenced; | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | static int page_referenced_anon(struct page *page, int ignore_token) | 
|  | 316 | { | 
|  | 317 | unsigned int mapcount; | 
|  | 318 | struct anon_vma *anon_vma; | 
|  | 319 | struct vm_area_struct *vma; | 
|  | 320 | int referenced = 0; | 
|  | 321 |  | 
|  | 322 | anon_vma = page_lock_anon_vma(page); | 
|  | 323 | if (!anon_vma) | 
|  | 324 | return referenced; | 
|  | 325 |  | 
|  | 326 | mapcount = page_mapcount(page); | 
|  | 327 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 
|  | 328 | referenced += page_referenced_one(page, vma, &mapcount, | 
|  | 329 | ignore_token); | 
|  | 330 | if (!mapcount) | 
|  | 331 | break; | 
|  | 332 | } | 
|  | 333 | spin_unlock(&anon_vma->lock); | 
|  | 334 | return referenced; | 
|  | 335 | } | 
|  | 336 |  | 
|  | 337 | /** | 
|  | 338 | * page_referenced_file - referenced check for object-based rmap | 
|  | 339 | * @page: the page we're checking references on. | 
|  | 340 | * | 
|  | 341 | * For an object-based mapped page, find all the places it is mapped and | 
|  | 342 | * check/clear the referenced flag.  This is done by following the page->mapping | 
|  | 343 | * pointer, then walking the chain of vmas it holds.  It returns the number | 
|  | 344 | * of references it found. | 
|  | 345 | * | 
|  | 346 | * This function is only called from page_referenced for object-based pages. | 
|  | 347 | */ | 
|  | 348 | static int page_referenced_file(struct page *page, int ignore_token) | 
|  | 349 | { | 
|  | 350 | unsigned int mapcount; | 
|  | 351 | struct address_space *mapping = page->mapping; | 
|  | 352 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
|  | 353 | struct vm_area_struct *vma; | 
|  | 354 | struct prio_tree_iter iter; | 
|  | 355 | int referenced = 0; | 
|  | 356 |  | 
|  | 357 | /* | 
|  | 358 | * The caller's checks on page->mapping and !PageAnon have made | 
|  | 359 | * sure that this is a file page: the check for page->mapping | 
|  | 360 | * excludes the case just before it gets set on an anon page. | 
|  | 361 | */ | 
|  | 362 | BUG_ON(PageAnon(page)); | 
|  | 363 |  | 
|  | 364 | /* | 
|  | 365 | * The page lock not only makes sure that page->mapping cannot | 
|  | 366 | * suddenly be NULLified by truncation, it makes sure that the | 
|  | 367 | * structure at mapping cannot be freed and reused yet, | 
|  | 368 | * so we can safely take mapping->i_mmap_lock. | 
|  | 369 | */ | 
|  | 370 | BUG_ON(!PageLocked(page)); | 
|  | 371 |  | 
|  | 372 | spin_lock(&mapping->i_mmap_lock); | 
|  | 373 |  | 
|  | 374 | /* | 
|  | 375 | * i_mmap_lock does not stabilize mapcount at all, but mapcount | 
|  | 376 | * is more likely to be accurate if we note it after spinning. | 
|  | 377 | */ | 
|  | 378 | mapcount = page_mapcount(page); | 
|  | 379 |  | 
|  | 380 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
|  | 381 | if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) | 
|  | 382 | == (VM_LOCKED|VM_MAYSHARE)) { | 
|  | 383 | referenced++; | 
|  | 384 | break; | 
|  | 385 | } | 
|  | 386 | referenced += page_referenced_one(page, vma, &mapcount, | 
|  | 387 | ignore_token); | 
|  | 388 | if (!mapcount) | 
|  | 389 | break; | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | spin_unlock(&mapping->i_mmap_lock); | 
|  | 393 | return referenced; | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 | /** | 
|  | 397 | * page_referenced - test if the page was referenced | 
|  | 398 | * @page: the page to test | 
|  | 399 | * @is_locked: caller holds lock on the page | 
|  | 400 | * | 
|  | 401 | * Quick test_and_clear_referenced for all mappings to a page, | 
|  | 402 | * returns the number of ptes which referenced the page. | 
|  | 403 | */ | 
|  | 404 | int page_referenced(struct page *page, int is_locked, int ignore_token) | 
|  | 405 | { | 
|  | 406 | int referenced = 0; | 
|  | 407 |  | 
|  | 408 | if (!swap_token_default_timeout) | 
|  | 409 | ignore_token = 1; | 
|  | 410 |  | 
|  | 411 | if (page_test_and_clear_young(page)) | 
|  | 412 | referenced++; | 
|  | 413 |  | 
|  | 414 | if (TestClearPageReferenced(page)) | 
|  | 415 | referenced++; | 
|  | 416 |  | 
|  | 417 | if (page_mapped(page) && page->mapping) { | 
|  | 418 | if (PageAnon(page)) | 
|  | 419 | referenced += page_referenced_anon(page, ignore_token); | 
|  | 420 | else if (is_locked) | 
|  | 421 | referenced += page_referenced_file(page, ignore_token); | 
|  | 422 | else if (TestSetPageLocked(page)) | 
|  | 423 | referenced++; | 
|  | 424 | else { | 
|  | 425 | if (page->mapping) | 
|  | 426 | referenced += page_referenced_file(page, | 
|  | 427 | ignore_token); | 
|  | 428 | unlock_page(page); | 
|  | 429 | } | 
|  | 430 | } | 
|  | 431 | return referenced; | 
|  | 432 | } | 
|  | 433 |  | 
|  | 434 | /** | 
|  | 435 | * page_add_anon_rmap - add pte mapping to an anonymous page | 
|  | 436 | * @page:	the page to add the mapping to | 
|  | 437 | * @vma:	the vm area in which the mapping is added | 
|  | 438 | * @address:	the user virtual address mapped | 
|  | 439 | * | 
|  | 440 | * The caller needs to hold the mm->page_table_lock. | 
|  | 441 | */ | 
|  | 442 | void page_add_anon_rmap(struct page *page, | 
|  | 443 | struct vm_area_struct *vma, unsigned long address) | 
|  | 444 | { | 
|  | 445 | struct anon_vma *anon_vma = vma->anon_vma; | 
|  | 446 | pgoff_t index; | 
|  | 447 |  | 
|  | 448 | BUG_ON(PageReserved(page)); | 
|  | 449 | BUG_ON(!anon_vma); | 
|  | 450 |  | 
|  | 451 | inc_mm_counter(vma->vm_mm, anon_rss); | 
|  | 452 |  | 
|  | 453 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 
|  | 454 | index = (address - vma->vm_start) >> PAGE_SHIFT; | 
|  | 455 | index += vma->vm_pgoff; | 
|  | 456 | index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; | 
|  | 457 |  | 
|  | 458 | if (atomic_inc_and_test(&page->_mapcount)) { | 
|  | 459 | page->index = index; | 
|  | 460 | page->mapping = (struct address_space *) anon_vma; | 
|  | 461 | inc_page_state(nr_mapped); | 
|  | 462 | } | 
|  | 463 | /* else checking page index and mapping is racy */ | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | /** | 
|  | 467 | * page_add_file_rmap - add pte mapping to a file page | 
|  | 468 | * @page: the page to add the mapping to | 
|  | 469 | * | 
|  | 470 | * The caller needs to hold the mm->page_table_lock. | 
|  | 471 | */ | 
|  | 472 | void page_add_file_rmap(struct page *page) | 
|  | 473 | { | 
|  | 474 | BUG_ON(PageAnon(page)); | 
|  | 475 | if (!pfn_valid(page_to_pfn(page)) || PageReserved(page)) | 
|  | 476 | return; | 
|  | 477 |  | 
|  | 478 | if (atomic_inc_and_test(&page->_mapcount)) | 
|  | 479 | inc_page_state(nr_mapped); | 
|  | 480 | } | 
|  | 481 |  | 
|  | 482 | /** | 
|  | 483 | * page_remove_rmap - take down pte mapping from a page | 
|  | 484 | * @page: page to remove mapping from | 
|  | 485 | * | 
|  | 486 | * Caller needs to hold the mm->page_table_lock. | 
|  | 487 | */ | 
|  | 488 | void page_remove_rmap(struct page *page) | 
|  | 489 | { | 
|  | 490 | BUG_ON(PageReserved(page)); | 
|  | 491 |  | 
|  | 492 | if (atomic_add_negative(-1, &page->_mapcount)) { | 
|  | 493 | BUG_ON(page_mapcount(page) < 0); | 
|  | 494 | /* | 
|  | 495 | * It would be tidy to reset the PageAnon mapping here, | 
|  | 496 | * but that might overwrite a racing page_add_anon_rmap | 
|  | 497 | * which increments mapcount after us but sets mapping | 
|  | 498 | * before us: so leave the reset to free_hot_cold_page, | 
|  | 499 | * and remember that it's only reliable while mapped. | 
|  | 500 | * Leaving it set also helps swapoff to reinstate ptes | 
|  | 501 | * faster for those pages still in swapcache. | 
|  | 502 | */ | 
|  | 503 | if (page_test_and_clear_dirty(page)) | 
|  | 504 | set_page_dirty(page); | 
|  | 505 | dec_page_state(nr_mapped); | 
|  | 506 | } | 
|  | 507 | } | 
|  | 508 |  | 
|  | 509 | /* | 
|  | 510 | * Subfunctions of try_to_unmap: try_to_unmap_one called | 
|  | 511 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. | 
|  | 512 | */ | 
|  | 513 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) | 
|  | 514 | { | 
|  | 515 | struct mm_struct *mm = vma->vm_mm; | 
|  | 516 | unsigned long address; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | pte_t *pte; | 
|  | 518 | pte_t pteval; | 
|  | 519 | int ret = SWAP_AGAIN; | 
|  | 520 |  | 
|  | 521 | if (!get_mm_counter(mm, rss)) | 
|  | 522 | goto out; | 
|  | 523 | address = vma_address(page, vma); | 
|  | 524 | if (address == -EFAULT) | 
|  | 525 | goto out; | 
|  | 526 |  | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 527 | pte = page_check_address(page, mm, address); | 
|  | 528 | if (IS_ERR(pte)) | 
|  | 529 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 |  | 
|  | 531 | /* | 
|  | 532 | * If the page is mlock()d, we cannot swap it out. | 
|  | 533 | * If it's recently referenced (perhaps page_referenced | 
|  | 534 | * skipped over this mm) then we should reactivate it. | 
|  | 535 | */ | 
|  | 536 | if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || | 
|  | 537 | ptep_clear_flush_young(vma, address, pte)) { | 
|  | 538 | ret = SWAP_FAIL; | 
|  | 539 | goto out_unmap; | 
|  | 540 | } | 
|  | 541 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | /* Nuke the page table entry. */ | 
|  | 543 | flush_cache_page(vma, address, page_to_pfn(page)); | 
|  | 544 | pteval = ptep_clear_flush(vma, address, pte); | 
|  | 545 |  | 
|  | 546 | /* Move the dirty bit to the physical page now the pte is gone. */ | 
|  | 547 | if (pte_dirty(pteval)) | 
|  | 548 | set_page_dirty(page); | 
|  | 549 |  | 
|  | 550 | if (PageAnon(page)) { | 
|  | 551 | swp_entry_t entry = { .val = page->private }; | 
|  | 552 | /* | 
|  | 553 | * Store the swap location in the pte. | 
|  | 554 | * See handle_pte_fault() ... | 
|  | 555 | */ | 
|  | 556 | BUG_ON(!PageSwapCache(page)); | 
|  | 557 | swap_duplicate(entry); | 
|  | 558 | if (list_empty(&mm->mmlist)) { | 
|  | 559 | spin_lock(&mmlist_lock); | 
|  | 560 | list_add(&mm->mmlist, &init_mm.mmlist); | 
|  | 561 | spin_unlock(&mmlist_lock); | 
|  | 562 | } | 
|  | 563 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 
|  | 564 | BUG_ON(pte_file(*pte)); | 
|  | 565 | dec_mm_counter(mm, anon_rss); | 
|  | 566 | } | 
|  | 567 |  | 
| Bjorn Steinbrink | 202d182 | 2005-05-16 21:53:17 -0700 | [diff] [blame] | 568 | dec_mm_counter(mm, rss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | page_remove_rmap(page); | 
|  | 570 | page_cache_release(page); | 
|  | 571 |  | 
|  | 572 | out_unmap: | 
|  | 573 | pte_unmap(pte); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | spin_unlock(&mm->page_table_lock); | 
|  | 575 | out: | 
|  | 576 | return ret; | 
|  | 577 | } | 
|  | 578 |  | 
|  | 579 | /* | 
|  | 580 | * objrmap doesn't work for nonlinear VMAs because the assumption that | 
|  | 581 | * offset-into-file correlates with offset-into-virtual-addresses does not hold. | 
|  | 582 | * Consequently, given a particular page and its ->index, we cannot locate the | 
|  | 583 | * ptes which are mapping that page without an exhaustive linear search. | 
|  | 584 | * | 
|  | 585 | * So what this code does is a mini "virtual scan" of each nonlinear VMA which | 
|  | 586 | * maps the file to which the target page belongs.  The ->vm_private_data field | 
|  | 587 | * holds the current cursor into that scan.  Successive searches will circulate | 
|  | 588 | * around the vma's virtual address space. | 
|  | 589 | * | 
|  | 590 | * So as more replacement pressure is applied to the pages in a nonlinear VMA, | 
|  | 591 | * more scanning pressure is placed against them as well.   Eventually pages | 
|  | 592 | * will become fully unmapped and are eligible for eviction. | 
|  | 593 | * | 
|  | 594 | * For very sparsely populated VMAs this is a little inefficient - chances are | 
|  | 595 | * there there won't be many ptes located within the scan cluster.  In this case | 
|  | 596 | * maybe we could scan further - to the end of the pte page, perhaps. | 
|  | 597 | */ | 
|  | 598 | #define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE) | 
|  | 599 | #define CLUSTER_MASK	(~(CLUSTER_SIZE - 1)) | 
|  | 600 |  | 
|  | 601 | static void try_to_unmap_cluster(unsigned long cursor, | 
|  | 602 | unsigned int *mapcount, struct vm_area_struct *vma) | 
|  | 603 | { | 
|  | 604 | struct mm_struct *mm = vma->vm_mm; | 
|  | 605 | pgd_t *pgd; | 
|  | 606 | pud_t *pud; | 
|  | 607 | pmd_t *pmd; | 
| William Lee Irwin III | cafdd8b | 2005-05-24 19:31:09 -0700 | [diff] [blame] | 608 | pte_t *pte, *original_pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | pte_t pteval; | 
|  | 610 | struct page *page; | 
|  | 611 | unsigned long address; | 
|  | 612 | unsigned long end; | 
|  | 613 | unsigned long pfn; | 
|  | 614 |  | 
|  | 615 | /* | 
|  | 616 | * We need the page_table_lock to protect us from page faults, | 
|  | 617 | * munmap, fork, etc... | 
|  | 618 | */ | 
|  | 619 | spin_lock(&mm->page_table_lock); | 
|  | 620 |  | 
|  | 621 | address = (vma->vm_start + cursor) & CLUSTER_MASK; | 
|  | 622 | end = address + CLUSTER_SIZE; | 
|  | 623 | if (address < vma->vm_start) | 
|  | 624 | address = vma->vm_start; | 
|  | 625 | if (end > vma->vm_end) | 
|  | 626 | end = vma->vm_end; | 
|  | 627 |  | 
|  | 628 | pgd = pgd_offset(mm, address); | 
|  | 629 | if (!pgd_present(*pgd)) | 
|  | 630 | goto out_unlock; | 
|  | 631 |  | 
|  | 632 | pud = pud_offset(pgd, address); | 
|  | 633 | if (!pud_present(*pud)) | 
|  | 634 | goto out_unlock; | 
|  | 635 |  | 
|  | 636 | pmd = pmd_offset(pud, address); | 
|  | 637 | if (!pmd_present(*pmd)) | 
|  | 638 | goto out_unlock; | 
|  | 639 |  | 
| William Lee Irwin III | cafdd8b | 2005-05-24 19:31:09 -0700 | [diff] [blame] | 640 | for (original_pte = pte = pte_offset_map(pmd, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | address < end; pte++, address += PAGE_SIZE) { | 
|  | 642 |  | 
|  | 643 | if (!pte_present(*pte)) | 
|  | 644 | continue; | 
|  | 645 |  | 
|  | 646 | pfn = pte_pfn(*pte); | 
|  | 647 | if (!pfn_valid(pfn)) | 
|  | 648 | continue; | 
|  | 649 |  | 
|  | 650 | page = pfn_to_page(pfn); | 
|  | 651 | BUG_ON(PageAnon(page)); | 
|  | 652 | if (PageReserved(page)) | 
|  | 653 | continue; | 
|  | 654 |  | 
|  | 655 | if (ptep_clear_flush_young(vma, address, pte)) | 
|  | 656 | continue; | 
|  | 657 |  | 
|  | 658 | /* Nuke the page table entry. */ | 
|  | 659 | flush_cache_page(vma, address, pfn); | 
|  | 660 | pteval = ptep_clear_flush(vma, address, pte); | 
|  | 661 |  | 
|  | 662 | /* If nonlinear, store the file page offset in the pte. */ | 
|  | 663 | if (page->index != linear_page_index(vma, address)) | 
|  | 664 | set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); | 
|  | 665 |  | 
|  | 666 | /* Move the dirty bit to the physical page now the pte is gone. */ | 
|  | 667 | if (pte_dirty(pteval)) | 
|  | 668 | set_page_dirty(page); | 
|  | 669 |  | 
|  | 670 | page_remove_rmap(page); | 
|  | 671 | page_cache_release(page); | 
|  | 672 | dec_mm_counter(mm, rss); | 
|  | 673 | (*mapcount)--; | 
|  | 674 | } | 
|  | 675 |  | 
| William Lee Irwin III | cafdd8b | 2005-05-24 19:31:09 -0700 | [diff] [blame] | 676 | pte_unmap(original_pte); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | out_unlock: | 
|  | 678 | spin_unlock(&mm->page_table_lock); | 
|  | 679 | } | 
|  | 680 |  | 
|  | 681 | static int try_to_unmap_anon(struct page *page) | 
|  | 682 | { | 
|  | 683 | struct anon_vma *anon_vma; | 
|  | 684 | struct vm_area_struct *vma; | 
|  | 685 | int ret = SWAP_AGAIN; | 
|  | 686 |  | 
|  | 687 | anon_vma = page_lock_anon_vma(page); | 
|  | 688 | if (!anon_vma) | 
|  | 689 | return ret; | 
|  | 690 |  | 
|  | 691 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 
|  | 692 | ret = try_to_unmap_one(page, vma); | 
|  | 693 | if (ret == SWAP_FAIL || !page_mapped(page)) | 
|  | 694 | break; | 
|  | 695 | } | 
|  | 696 | spin_unlock(&anon_vma->lock); | 
|  | 697 | return ret; | 
|  | 698 | } | 
|  | 699 |  | 
|  | 700 | /** | 
|  | 701 | * try_to_unmap_file - unmap file page using the object-based rmap method | 
|  | 702 | * @page: the page to unmap | 
|  | 703 | * | 
|  | 704 | * Find all the mappings of a page using the mapping pointer and the vma chains | 
|  | 705 | * contained in the address_space struct it points to. | 
|  | 706 | * | 
|  | 707 | * This function is only called from try_to_unmap for object-based pages. | 
|  | 708 | */ | 
|  | 709 | static int try_to_unmap_file(struct page *page) | 
|  | 710 | { | 
|  | 711 | struct address_space *mapping = page->mapping; | 
|  | 712 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
|  | 713 | struct vm_area_struct *vma; | 
|  | 714 | struct prio_tree_iter iter; | 
|  | 715 | int ret = SWAP_AGAIN; | 
|  | 716 | unsigned long cursor; | 
|  | 717 | unsigned long max_nl_cursor = 0; | 
|  | 718 | unsigned long max_nl_size = 0; | 
|  | 719 | unsigned int mapcount; | 
|  | 720 |  | 
|  | 721 | spin_lock(&mapping->i_mmap_lock); | 
|  | 722 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
|  | 723 | ret = try_to_unmap_one(page, vma); | 
|  | 724 | if (ret == SWAP_FAIL || !page_mapped(page)) | 
|  | 725 | goto out; | 
|  | 726 | } | 
|  | 727 |  | 
|  | 728 | if (list_empty(&mapping->i_mmap_nonlinear)) | 
|  | 729 | goto out; | 
|  | 730 |  | 
|  | 731 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 
|  | 732 | shared.vm_set.list) { | 
|  | 733 | if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) | 
|  | 734 | continue; | 
|  | 735 | cursor = (unsigned long) vma->vm_private_data; | 
|  | 736 | if (cursor > max_nl_cursor) | 
|  | 737 | max_nl_cursor = cursor; | 
|  | 738 | cursor = vma->vm_end - vma->vm_start; | 
|  | 739 | if (cursor > max_nl_size) | 
|  | 740 | max_nl_size = cursor; | 
|  | 741 | } | 
|  | 742 |  | 
|  | 743 | if (max_nl_size == 0) {	/* any nonlinears locked or reserved */ | 
|  | 744 | ret = SWAP_FAIL; | 
|  | 745 | goto out; | 
|  | 746 | } | 
|  | 747 |  | 
|  | 748 | /* | 
|  | 749 | * We don't try to search for this page in the nonlinear vmas, | 
|  | 750 | * and page_referenced wouldn't have found it anyway.  Instead | 
|  | 751 | * just walk the nonlinear vmas trying to age and unmap some. | 
|  | 752 | * The mapcount of the page we came in with is irrelevant, | 
|  | 753 | * but even so use it as a guide to how hard we should try? | 
|  | 754 | */ | 
|  | 755 | mapcount = page_mapcount(page); | 
|  | 756 | if (!mapcount) | 
|  | 757 | goto out; | 
|  | 758 | cond_resched_lock(&mapping->i_mmap_lock); | 
|  | 759 |  | 
|  | 760 | max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; | 
|  | 761 | if (max_nl_cursor == 0) | 
|  | 762 | max_nl_cursor = CLUSTER_SIZE; | 
|  | 763 |  | 
|  | 764 | do { | 
|  | 765 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 
|  | 766 | shared.vm_set.list) { | 
|  | 767 | if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) | 
|  | 768 | continue; | 
|  | 769 | cursor = (unsigned long) vma->vm_private_data; | 
|  | 770 | while (get_mm_counter(vma->vm_mm, rss) && | 
|  | 771 | cursor < max_nl_cursor && | 
|  | 772 | cursor < vma->vm_end - vma->vm_start) { | 
|  | 773 | try_to_unmap_cluster(cursor, &mapcount, vma); | 
|  | 774 | cursor += CLUSTER_SIZE; | 
|  | 775 | vma->vm_private_data = (void *) cursor; | 
|  | 776 | if ((int)mapcount <= 0) | 
|  | 777 | goto out; | 
|  | 778 | } | 
|  | 779 | vma->vm_private_data = (void *) max_nl_cursor; | 
|  | 780 | } | 
|  | 781 | cond_resched_lock(&mapping->i_mmap_lock); | 
|  | 782 | max_nl_cursor += CLUSTER_SIZE; | 
|  | 783 | } while (max_nl_cursor <= max_nl_size); | 
|  | 784 |  | 
|  | 785 | /* | 
|  | 786 | * Don't loop forever (perhaps all the remaining pages are | 
|  | 787 | * in locked vmas).  Reset cursor on all unreserved nonlinear | 
|  | 788 | * vmas, now forgetting on which ones it had fallen behind. | 
|  | 789 | */ | 
|  | 790 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 
|  | 791 | shared.vm_set.list) { | 
|  | 792 | if (!(vma->vm_flags & VM_RESERVED)) | 
|  | 793 | vma->vm_private_data = NULL; | 
|  | 794 | } | 
|  | 795 | out: | 
|  | 796 | spin_unlock(&mapping->i_mmap_lock); | 
|  | 797 | return ret; | 
|  | 798 | } | 
|  | 799 |  | 
|  | 800 | /** | 
|  | 801 | * try_to_unmap - try to remove all page table mappings to a page | 
|  | 802 | * @page: the page to get unmapped | 
|  | 803 | * | 
|  | 804 | * Tries to remove all the page table entries which are mapping this | 
|  | 805 | * page, used in the pageout path.  Caller must hold the page lock. | 
|  | 806 | * Return values are: | 
|  | 807 | * | 
|  | 808 | * SWAP_SUCCESS	- we succeeded in removing all mappings | 
|  | 809 | * SWAP_AGAIN	- we missed a mapping, try again later | 
|  | 810 | * SWAP_FAIL	- the page is unswappable | 
|  | 811 | */ | 
|  | 812 | int try_to_unmap(struct page *page) | 
|  | 813 | { | 
|  | 814 | int ret; | 
|  | 815 |  | 
|  | 816 | BUG_ON(PageReserved(page)); | 
|  | 817 | BUG_ON(!PageLocked(page)); | 
|  | 818 |  | 
|  | 819 | if (PageAnon(page)) | 
|  | 820 | ret = try_to_unmap_anon(page); | 
|  | 821 | else | 
|  | 822 | ret = try_to_unmap_file(page); | 
|  | 823 |  | 
|  | 824 | if (!page_mapped(page)) | 
|  | 825 | ret = SWAP_SUCCESS; | 
|  | 826 | return ret; | 
|  | 827 | } | 
| Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 828 |  |