| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/mm/vmscan.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
|  | 5 | * | 
|  | 6 | *  Swap reorganised 29.12.95, Stephen Tweedie. | 
|  | 7 | *  kswapd added: 7.1.96  sct | 
|  | 8 | *  Removed kswapd_ctl limits, and swap out as many pages as needed | 
|  | 9 | *  to bring the system back to freepages.high: 2.4.97, Rik van Riel. | 
|  | 10 | *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). | 
|  | 11 | *  Multiqueue VM started 5.8.00, Rik van Riel. | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #include <linux/mm.h> | 
|  | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/slab.h> | 
|  | 17 | #include <linux/kernel_stat.h> | 
|  | 18 | #include <linux/swap.h> | 
|  | 19 | #include <linux/pagemap.h> | 
|  | 20 | #include <linux/init.h> | 
|  | 21 | #include <linux/highmem.h> | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 22 | #include <linux/vmstat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <linux/file.h> | 
|  | 24 | #include <linux/writeback.h> | 
|  | 25 | #include <linux/blkdev.h> | 
|  | 26 | #include <linux/buffer_head.h>	/* for try_to_release_page(), | 
|  | 27 | buffer_heads_over_limit */ | 
|  | 28 | #include <linux/mm_inline.h> | 
|  | 29 | #include <linux/pagevec.h> | 
|  | 30 | #include <linux/backing-dev.h> | 
|  | 31 | #include <linux/rmap.h> | 
|  | 32 | #include <linux/topology.h> | 
|  | 33 | #include <linux/cpu.h> | 
|  | 34 | #include <linux/cpuset.h> | 
|  | 35 | #include <linux/notifier.h> | 
|  | 36 | #include <linux/rwsem.h> | 
| Rafael J. Wysocki | 248a030 | 2006-03-22 00:09:04 -0800 | [diff] [blame] | 37 | #include <linux/delay.h> | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 38 | #include <linux/kthread.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
|  | 40 | #include <asm/tlbflush.h> | 
|  | 41 | #include <asm/div64.h> | 
|  | 42 |  | 
|  | 43 | #include <linux/swapops.h> | 
|  | 44 |  | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 45 | #include "internal.h" | 
|  | 46 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | struct scan_control { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | /* Incremented by the number of inactive pages that were scanned */ | 
|  | 49 | unsigned long nr_scanned; | 
|  | 50 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | /* This context's GFP mask */ | 
| Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 52 | gfp_t gfp_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 |  | 
|  | 54 | int may_writepage; | 
|  | 55 |  | 
| Christoph Lameter | f1fd106 | 2006-01-18 17:42:30 -0800 | [diff] [blame] | 56 | /* Can pages be swapped as part of reclaim? */ | 
|  | 57 | int may_swap; | 
|  | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | /* This context's SWAP_CLUSTER_MAX. If freeing memory for | 
|  | 60 | * suspend, we effectively ignore SWAP_CLUSTER_MAX. | 
|  | 61 | * In this context, it doesn't matter that we scan the | 
|  | 62 | * whole list at once. */ | 
|  | 63 | int swap_cluster_max; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 64 |  | 
|  | 65 | int swappiness; | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 66 |  | 
|  | 67 | int all_unreclaimable; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | }; | 
|  | 69 |  | 
|  | 70 | /* | 
|  | 71 | * The list of shrinker callbacks used by to apply pressure to | 
|  | 72 | * ageable caches. | 
|  | 73 | */ | 
|  | 74 | struct shrinker { | 
|  | 75 | shrinker_t		shrinker; | 
|  | 76 | struct list_head	list; | 
|  | 77 | int			seeks;	/* seeks to recreate an obj */ | 
|  | 78 | long			nr;	/* objs pending delete */ | 
|  | 79 | }; | 
|  | 80 |  | 
|  | 81 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | 
|  | 82 |  | 
|  | 83 | #ifdef ARCH_HAS_PREFETCH | 
|  | 84 | #define prefetch_prev_lru_page(_page, _base, _field)			\ | 
|  | 85 | do {								\ | 
|  | 86 | if ((_page)->lru.prev != _base) {			\ | 
|  | 87 | struct page *prev;				\ | 
|  | 88 | \ | 
|  | 89 | prev = lru_to_page(&(_page->lru));		\ | 
|  | 90 | prefetch(&prev->_field);			\ | 
|  | 91 | }							\ | 
|  | 92 | } while (0) | 
|  | 93 | #else | 
|  | 94 | #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) | 
|  | 95 | #endif | 
|  | 96 |  | 
|  | 97 | #ifdef ARCH_HAS_PREFETCHW | 
|  | 98 | #define prefetchw_prev_lru_page(_page, _base, _field)			\ | 
|  | 99 | do {								\ | 
|  | 100 | if ((_page)->lru.prev != _base) {			\ | 
|  | 101 | struct page *prev;				\ | 
|  | 102 | \ | 
|  | 103 | prev = lru_to_page(&(_page->lru));		\ | 
|  | 104 | prefetchw(&prev->_field);			\ | 
|  | 105 | }							\ | 
|  | 106 | } while (0) | 
|  | 107 | #else | 
|  | 108 | #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) | 
|  | 109 | #endif | 
|  | 110 |  | 
|  | 111 | /* | 
|  | 112 | * From 0 .. 100.  Higher means more swappy. | 
|  | 113 | */ | 
|  | 114 | int vm_swappiness = 60; | 
| Andrew Morton | bd1e22b | 2006-06-23 02:03:47 -0700 | [diff] [blame] | 115 | long vm_total_pages;	/* The total number of pages which the VM controls */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 |  | 
|  | 117 | static LIST_HEAD(shrinker_list); | 
|  | 118 | static DECLARE_RWSEM(shrinker_rwsem); | 
|  | 119 |  | 
|  | 120 | /* | 
|  | 121 | * Add a shrinker callback to be called from the vm | 
|  | 122 | */ | 
|  | 123 | struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) | 
|  | 124 | { | 
|  | 125 | struct shrinker *shrinker; | 
|  | 126 |  | 
|  | 127 | shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); | 
|  | 128 | if (shrinker) { | 
|  | 129 | shrinker->shrinker = theshrinker; | 
|  | 130 | shrinker->seeks = seeks; | 
|  | 131 | shrinker->nr = 0; | 
|  | 132 | down_write(&shrinker_rwsem); | 
|  | 133 | list_add_tail(&shrinker->list, &shrinker_list); | 
|  | 134 | up_write(&shrinker_rwsem); | 
|  | 135 | } | 
|  | 136 | return shrinker; | 
|  | 137 | } | 
|  | 138 | EXPORT_SYMBOL(set_shrinker); | 
|  | 139 |  | 
|  | 140 | /* | 
|  | 141 | * Remove one | 
|  | 142 | */ | 
|  | 143 | void remove_shrinker(struct shrinker *shrinker) | 
|  | 144 | { | 
|  | 145 | down_write(&shrinker_rwsem); | 
|  | 146 | list_del(&shrinker->list); | 
|  | 147 | up_write(&shrinker_rwsem); | 
|  | 148 | kfree(shrinker); | 
|  | 149 | } | 
|  | 150 | EXPORT_SYMBOL(remove_shrinker); | 
|  | 151 |  | 
|  | 152 | #define SHRINK_BATCH 128 | 
|  | 153 | /* | 
|  | 154 | * Call the shrink functions to age shrinkable caches | 
|  | 155 | * | 
|  | 156 | * Here we assume it costs one seek to replace a lru page and that it also | 
|  | 157 | * takes a seek to recreate a cache object.  With this in mind we age equal | 
|  | 158 | * percentages of the lru and ageable caches.  This should balance the seeks | 
|  | 159 | * generated by these structures. | 
|  | 160 | * | 
|  | 161 | * If the vm encounted mapped pages on the LRU it increase the pressure on | 
|  | 162 | * slab to avoid swapping. | 
|  | 163 | * | 
|  | 164 | * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. | 
|  | 165 | * | 
|  | 166 | * `lru_pages' represents the number of on-LRU pages in all the zones which | 
|  | 167 | * are eligible for the caller's allocation attempt.  It is used for balancing | 
|  | 168 | * slab reclaim versus page reclaim. | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 169 | * | 
|  | 170 | * Returns the number of slab objects which we shrunk. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 172 | unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | 
|  | 173 | unsigned long lru_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | { | 
|  | 175 | struct shrinker *shrinker; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 176 | unsigned long ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 |  | 
|  | 178 | if (scanned == 0) | 
|  | 179 | scanned = SWAP_CLUSTER_MAX; | 
|  | 180 |  | 
|  | 181 | if (!down_read_trylock(&shrinker_rwsem)) | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 182 | return 1;	/* Assume we'll be able to shrink next time */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 |  | 
|  | 184 | list_for_each_entry(shrinker, &shrinker_list, list) { | 
|  | 185 | unsigned long long delta; | 
|  | 186 | unsigned long total_scan; | 
| Andrea Arcangeli | ea164d7 | 2005-11-28 13:44:15 -0800 | [diff] [blame] | 187 | unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  | 
|  | 189 | delta = (4 * scanned) / shrinker->seeks; | 
| Andrea Arcangeli | ea164d7 | 2005-11-28 13:44:15 -0800 | [diff] [blame] | 190 | delta *= max_pass; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | do_div(delta, lru_pages + 1); | 
|  | 192 | shrinker->nr += delta; | 
| Andrea Arcangeli | ea164d7 | 2005-11-28 13:44:15 -0800 | [diff] [blame] | 193 | if (shrinker->nr < 0) { | 
|  | 194 | printk(KERN_ERR "%s: nr=%ld\n", | 
|  | 195 | __FUNCTION__, shrinker->nr); | 
|  | 196 | shrinker->nr = max_pass; | 
|  | 197 | } | 
|  | 198 |  | 
|  | 199 | /* | 
|  | 200 | * Avoid risking looping forever due to too large nr value: | 
|  | 201 | * never try to free more than twice the estimate number of | 
|  | 202 | * freeable entries. | 
|  | 203 | */ | 
|  | 204 | if (shrinker->nr > max_pass * 2) | 
|  | 205 | shrinker->nr = max_pass * 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 |  | 
|  | 207 | total_scan = shrinker->nr; | 
|  | 208 | shrinker->nr = 0; | 
|  | 209 |  | 
|  | 210 | while (total_scan >= SHRINK_BATCH) { | 
|  | 211 | long this_scan = SHRINK_BATCH; | 
|  | 212 | int shrink_ret; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 213 | int nr_before; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 |  | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 215 | nr_before = (*shrinker->shrinker)(0, gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); | 
|  | 217 | if (shrink_ret == -1) | 
|  | 218 | break; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 219 | if (shrink_ret < nr_before) | 
|  | 220 | ret += nr_before - shrink_ret; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 221 | count_vm_events(SLABS_SCANNED, this_scan); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | total_scan -= this_scan; | 
|  | 223 |  | 
|  | 224 | cond_resched(); | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | shrinker->nr += total_scan; | 
|  | 228 | } | 
|  | 229 | up_read(&shrinker_rwsem); | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 230 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | } | 
|  | 232 |  | 
|  | 233 | /* Called without lock on whether page is mapped, so answer is unstable */ | 
|  | 234 | static inline int page_mapping_inuse(struct page *page) | 
|  | 235 | { | 
|  | 236 | struct address_space *mapping; | 
|  | 237 |  | 
|  | 238 | /* Page is in somebody's page tables. */ | 
|  | 239 | if (page_mapped(page)) | 
|  | 240 | return 1; | 
|  | 241 |  | 
|  | 242 | /* Be more reluctant to reclaim swapcache than pagecache */ | 
|  | 243 | if (PageSwapCache(page)) | 
|  | 244 | return 1; | 
|  | 245 |  | 
|  | 246 | mapping = page_mapping(page); | 
|  | 247 | if (!mapping) | 
|  | 248 | return 0; | 
|  | 249 |  | 
|  | 250 | /* File is mmap'd by somebody? */ | 
|  | 251 | return mapping_mapped(mapping); | 
|  | 252 | } | 
|  | 253 |  | 
|  | 254 | static inline int is_page_cache_freeable(struct page *page) | 
|  | 255 | { | 
|  | 256 | return page_count(page) - !!PagePrivate(page) == 2; | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | static int may_write_to_queue(struct backing_dev_info *bdi) | 
|  | 260 | { | 
| Christoph Lameter | 930d915 | 2006-01-08 01:00:47 -0800 | [diff] [blame] | 261 | if (current->flags & PF_SWAPWRITE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | return 1; | 
|  | 263 | if (!bdi_write_congested(bdi)) | 
|  | 264 | return 1; | 
|  | 265 | if (bdi == current->backing_dev_info) | 
|  | 266 | return 1; | 
|  | 267 | return 0; | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | /* | 
|  | 271 | * We detected a synchronous write error writing a page out.  Probably | 
|  | 272 | * -ENOSPC.  We need to propagate that into the address_space for a subsequent | 
|  | 273 | * fsync(), msync() or close(). | 
|  | 274 | * | 
|  | 275 | * The tricky part is that after writepage we cannot touch the mapping: nothing | 
|  | 276 | * prevents it from being freed up.  But we have a ref on the page and once | 
|  | 277 | * that page is locked, the mapping is pinned. | 
|  | 278 | * | 
|  | 279 | * We're allowed to run sleeping lock_page() here because we know the caller has | 
|  | 280 | * __GFP_FS. | 
|  | 281 | */ | 
|  | 282 | static void handle_write_error(struct address_space *mapping, | 
|  | 283 | struct page *page, int error) | 
|  | 284 | { | 
|  | 285 | lock_page(page); | 
|  | 286 | if (page_mapping(page) == mapping) { | 
|  | 287 | if (error == -ENOSPC) | 
|  | 288 | set_bit(AS_ENOSPC, &mapping->flags); | 
|  | 289 | else | 
|  | 290 | set_bit(AS_EIO, &mapping->flags); | 
|  | 291 | } | 
|  | 292 | unlock_page(page); | 
|  | 293 | } | 
|  | 294 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 295 | /* possible outcome of pageout() */ | 
|  | 296 | typedef enum { | 
|  | 297 | /* failed to write page out, page is locked */ | 
|  | 298 | PAGE_KEEP, | 
|  | 299 | /* move page to the active list, page is locked */ | 
|  | 300 | PAGE_ACTIVATE, | 
|  | 301 | /* page has been sent to the disk successfully, page is unlocked */ | 
|  | 302 | PAGE_SUCCESS, | 
|  | 303 | /* page is clean and locked */ | 
|  | 304 | PAGE_CLEAN, | 
|  | 305 | } pageout_t; | 
|  | 306 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | /* | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 308 | * pageout is called by shrink_page_list() for each dirty page. | 
|  | 309 | * Calls ->writepage(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | */ | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 311 | static pageout_t pageout(struct page *page, struct address_space *mapping) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | { | 
|  | 313 | /* | 
|  | 314 | * If the page is dirty, only perform writeback if that write | 
|  | 315 | * will be non-blocking.  To prevent this allocation from being | 
|  | 316 | * stalled by pagecache activity.  But note that there may be | 
|  | 317 | * stalls if we need to run get_block().  We could test | 
|  | 318 | * PagePrivate for that. | 
|  | 319 | * | 
|  | 320 | * If this process is currently in generic_file_write() against | 
|  | 321 | * this page's queue, we can perform writeback even if that | 
|  | 322 | * will block. | 
|  | 323 | * | 
|  | 324 | * If the page is swapcache, write it back even if that would | 
|  | 325 | * block, for some throttling. This happens by accident, because | 
|  | 326 | * swap_backing_dev_info is bust: it doesn't reflect the | 
|  | 327 | * congestion state of the swapdevs.  Easy to fix, if needed. | 
|  | 328 | * See swapfile.c:page_queue_congested(). | 
|  | 329 | */ | 
|  | 330 | if (!is_page_cache_freeable(page)) | 
|  | 331 | return PAGE_KEEP; | 
|  | 332 | if (!mapping) { | 
|  | 333 | /* | 
|  | 334 | * Some data journaling orphaned pages can have | 
|  | 335 | * page->mapping == NULL while being dirty with clean buffers. | 
|  | 336 | */ | 
| akpm@osdl.org | 323aca6 | 2005-04-16 15:24:06 -0700 | [diff] [blame] | 337 | if (PagePrivate(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | if (try_to_free_buffers(page)) { | 
|  | 339 | ClearPageDirty(page); | 
|  | 340 | printk("%s: orphaned page\n", __FUNCTION__); | 
|  | 341 | return PAGE_CLEAN; | 
|  | 342 | } | 
|  | 343 | } | 
|  | 344 | return PAGE_KEEP; | 
|  | 345 | } | 
|  | 346 | if (mapping->a_ops->writepage == NULL) | 
|  | 347 | return PAGE_ACTIVATE; | 
|  | 348 | if (!may_write_to_queue(mapping->backing_dev_info)) | 
|  | 349 | return PAGE_KEEP; | 
|  | 350 |  | 
|  | 351 | if (clear_page_dirty_for_io(page)) { | 
|  | 352 | int res; | 
|  | 353 | struct writeback_control wbc = { | 
|  | 354 | .sync_mode = WB_SYNC_NONE, | 
|  | 355 | .nr_to_write = SWAP_CLUSTER_MAX, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 356 | .range_start = 0, | 
|  | 357 | .range_end = LLONG_MAX, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | .nonblocking = 1, | 
|  | 359 | .for_reclaim = 1, | 
|  | 360 | }; | 
|  | 361 |  | 
|  | 362 | SetPageReclaim(page); | 
|  | 363 | res = mapping->a_ops->writepage(page, &wbc); | 
|  | 364 | if (res < 0) | 
|  | 365 | handle_write_error(mapping, page, res); | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 366 | if (res == AOP_WRITEPAGE_ACTIVATE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | ClearPageReclaim(page); | 
|  | 368 | return PAGE_ACTIVATE; | 
|  | 369 | } | 
|  | 370 | if (!PageWriteback(page)) { | 
|  | 371 | /* synchronous write or broken a_ops? */ | 
|  | 372 | ClearPageReclaim(page); | 
|  | 373 | } | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 374 | inc_zone_page_state(page, NR_VMSCAN_WRITE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | return PAGE_SUCCESS; | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | return PAGE_CLEAN; | 
|  | 379 | } | 
|  | 380 |  | 
| Andrew Morton | a649fd9 | 2006-10-17 00:09:36 -0700 | [diff] [blame] | 381 | /* | 
|  | 382 | * Attempt to detach a locked page from its ->mapping.  If it is dirty or if | 
|  | 383 | * someone else has a ref on the page, abort and return 0.  If it was | 
|  | 384 | * successfully detached, return 1.  Assumes the caller has a single ref on | 
|  | 385 | * this page. | 
|  | 386 | */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 387 | int remove_mapping(struct address_space *mapping, struct page *page) | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 388 | { | 
| Nick Piggin | 28e4d96 | 2006-09-25 23:31:23 -0700 | [diff] [blame] | 389 | BUG_ON(!PageLocked(page)); | 
|  | 390 | BUG_ON(mapping != page_mapping(page)); | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 391 |  | 
|  | 392 | write_lock_irq(&mapping->tree_lock); | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 393 | /* | 
| Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 394 | * The non racy check for a busy page. | 
|  | 395 | * | 
|  | 396 | * Must be careful with the order of the tests. When someone has | 
|  | 397 | * a ref to the page, it may be possible that they dirty it then | 
|  | 398 | * drop the reference. So if PageDirty is tested before page_count | 
|  | 399 | * here, then the following race may occur: | 
|  | 400 | * | 
|  | 401 | * get_user_pages(&page); | 
|  | 402 | * [user mapping goes away] | 
|  | 403 | * write_to(page); | 
|  | 404 | *				!PageDirty(page)    [good] | 
|  | 405 | * SetPageDirty(page); | 
|  | 406 | * put_page(page); | 
|  | 407 | *				!page_count(page)   [good, discard it] | 
|  | 408 | * | 
|  | 409 | * [oops, our write_to data is lost] | 
|  | 410 | * | 
|  | 411 | * Reversing the order of the tests ensures such a situation cannot | 
|  | 412 | * escape unnoticed. The smp_rmb is needed to ensure the page->flags | 
|  | 413 | * load is not satisfied before that of page->_count. | 
|  | 414 | * | 
|  | 415 | * Note that if SetPageDirty is always performed via set_page_dirty, | 
|  | 416 | * and thus under tree_lock, then this ordering is not required. | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 417 | */ | 
|  | 418 | if (unlikely(page_count(page) != 2)) | 
|  | 419 | goto cannot_free; | 
|  | 420 | smp_rmb(); | 
|  | 421 | if (unlikely(PageDirty(page))) | 
|  | 422 | goto cannot_free; | 
|  | 423 |  | 
|  | 424 | if (PageSwapCache(page)) { | 
|  | 425 | swp_entry_t swap = { .val = page_private(page) }; | 
|  | 426 | __delete_from_swap_cache(page); | 
|  | 427 | write_unlock_irq(&mapping->tree_lock); | 
|  | 428 | swap_free(swap); | 
|  | 429 | __put_page(page);	/* The pagecache ref */ | 
|  | 430 | return 1; | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | __remove_from_page_cache(page); | 
|  | 434 | write_unlock_irq(&mapping->tree_lock); | 
|  | 435 | __put_page(page); | 
|  | 436 | return 1; | 
|  | 437 |  | 
|  | 438 | cannot_free: | 
|  | 439 | write_unlock_irq(&mapping->tree_lock); | 
|  | 440 | return 0; | 
|  | 441 | } | 
|  | 442 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | /* | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 444 | * shrink_page_list() returns the number of reclaimed pages | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | */ | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 446 | static unsigned long shrink_page_list(struct list_head *page_list, | 
|  | 447 | struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | { | 
|  | 449 | LIST_HEAD(ret_pages); | 
|  | 450 | struct pagevec freed_pvec; | 
|  | 451 | int pgactivate = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 452 | unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 |  | 
|  | 454 | cond_resched(); | 
|  | 455 |  | 
|  | 456 | pagevec_init(&freed_pvec, 1); | 
|  | 457 | while (!list_empty(page_list)) { | 
|  | 458 | struct address_space *mapping; | 
|  | 459 | struct page *page; | 
|  | 460 | int may_enter_fs; | 
|  | 461 | int referenced; | 
|  | 462 |  | 
|  | 463 | cond_resched(); | 
|  | 464 |  | 
|  | 465 | page = lru_to_page(page_list); | 
|  | 466 | list_del(&page->lru); | 
|  | 467 |  | 
|  | 468 | if (TestSetPageLocked(page)) | 
|  | 469 | goto keep; | 
|  | 470 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 471 | VM_BUG_ON(PageActive(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 |  | 
|  | 473 | sc->nr_scanned++; | 
| Christoph Lameter | 80e4342 | 2006-02-11 17:55:53 -0800 | [diff] [blame] | 474 |  | 
|  | 475 | if (!sc->may_swap && page_mapped(page)) | 
|  | 476 | goto keep_locked; | 
|  | 477 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | /* Double the slab pressure for mapped and swapcache pages */ | 
|  | 479 | if (page_mapped(page) || PageSwapCache(page)) | 
|  | 480 | sc->nr_scanned++; | 
|  | 481 |  | 
|  | 482 | if (PageWriteback(page)) | 
|  | 483 | goto keep_locked; | 
|  | 484 |  | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 485 | referenced = page_referenced(page, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | /* In active use or really unfreeable?  Activate it. */ | 
|  | 487 | if (referenced && page_mapping_inuse(page)) | 
|  | 488 | goto activate_locked; | 
|  | 489 |  | 
|  | 490 | #ifdef CONFIG_SWAP | 
|  | 491 | /* | 
|  | 492 | * Anonymous process memory has backing store? | 
|  | 493 | * Try to allocate it some swap space here. | 
|  | 494 | */ | 
| Christoph Lameter | 6e5ef1a | 2006-03-22 00:08:45 -0800 | [diff] [blame] | 495 | if (PageAnon(page) && !PageSwapCache(page)) | 
| Christoph Lameter | 1480a54 | 2006-01-08 01:00:53 -0800 | [diff] [blame] | 496 | if (!add_to_swap(page, GFP_ATOMIC)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | goto activate_locked; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | #endif /* CONFIG_SWAP */ | 
|  | 499 |  | 
|  | 500 | mapping = page_mapping(page); | 
|  | 501 | may_enter_fs = (sc->gfp_mask & __GFP_FS) || | 
|  | 502 | (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); | 
|  | 503 |  | 
|  | 504 | /* | 
|  | 505 | * The page is mapped into the page tables of one or more | 
|  | 506 | * processes. Try to unmap it here. | 
|  | 507 | */ | 
|  | 508 | if (page_mapped(page) && mapping) { | 
| Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 509 | switch (try_to_unmap(page, 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | case SWAP_FAIL: | 
|  | 511 | goto activate_locked; | 
|  | 512 | case SWAP_AGAIN: | 
|  | 513 | goto keep_locked; | 
|  | 514 | case SWAP_SUCCESS: | 
|  | 515 | ; /* try to free the page below */ | 
|  | 516 | } | 
|  | 517 | } | 
|  | 518 |  | 
|  | 519 | if (PageDirty(page)) { | 
|  | 520 | if (referenced) | 
|  | 521 | goto keep_locked; | 
|  | 522 | if (!may_enter_fs) | 
|  | 523 | goto keep_locked; | 
| Christoph Lameter | 52a8363 | 2006-02-01 03:05:28 -0800 | [diff] [blame] | 524 | if (!sc->may_writepage) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | goto keep_locked; | 
|  | 526 |  | 
|  | 527 | /* Page is dirty, try to write it out here */ | 
|  | 528 | switch(pageout(page, mapping)) { | 
|  | 529 | case PAGE_KEEP: | 
|  | 530 | goto keep_locked; | 
|  | 531 | case PAGE_ACTIVATE: | 
|  | 532 | goto activate_locked; | 
|  | 533 | case PAGE_SUCCESS: | 
|  | 534 | if (PageWriteback(page) || PageDirty(page)) | 
|  | 535 | goto keep; | 
|  | 536 | /* | 
|  | 537 | * A synchronous write - probably a ramdisk.  Go | 
|  | 538 | * ahead and try to reclaim the page. | 
|  | 539 | */ | 
|  | 540 | if (TestSetPageLocked(page)) | 
|  | 541 | goto keep; | 
|  | 542 | if (PageDirty(page) || PageWriteback(page)) | 
|  | 543 | goto keep_locked; | 
|  | 544 | mapping = page_mapping(page); | 
|  | 545 | case PAGE_CLEAN: | 
|  | 546 | ; /* try to free the page below */ | 
|  | 547 | } | 
|  | 548 | } | 
|  | 549 |  | 
|  | 550 | /* | 
|  | 551 | * If the page has buffers, try to free the buffer mappings | 
|  | 552 | * associated with this page. If we succeed we try to free | 
|  | 553 | * the page as well. | 
|  | 554 | * | 
|  | 555 | * We do this even if the page is PageDirty(). | 
|  | 556 | * try_to_release_page() does not perform I/O, but it is | 
|  | 557 | * possible for a page to have PageDirty set, but it is actually | 
|  | 558 | * clean (all its buffers are clean).  This happens if the | 
|  | 559 | * buffers were written out directly, with submit_bh(). ext3 | 
|  | 560 | * will do this, as well as the blockdev mapping. | 
|  | 561 | * try_to_release_page() will discover that cleanness and will | 
|  | 562 | * drop the buffers and mark the page clean - it can be freed. | 
|  | 563 | * | 
|  | 564 | * Rarely, pages can have buffers and no ->mapping.  These are | 
|  | 565 | * the pages which were not successfully invalidated in | 
|  | 566 | * truncate_complete_page().  We try to drop those buffers here | 
|  | 567 | * and if that worked, and the page is no longer mapped into | 
|  | 568 | * process address space (page_count == 1) it can be freed. | 
|  | 569 | * Otherwise, leave the page on the LRU so it is swappable. | 
|  | 570 | */ | 
|  | 571 | if (PagePrivate(page)) { | 
|  | 572 | if (!try_to_release_page(page, sc->gfp_mask)) | 
|  | 573 | goto activate_locked; | 
|  | 574 | if (!mapping && page_count(page) == 1) | 
|  | 575 | goto free_it; | 
|  | 576 | } | 
|  | 577 |  | 
| Nick Piggin | 28e4d96 | 2006-09-25 23:31:23 -0700 | [diff] [blame] | 578 | if (!mapping || !remove_mapping(mapping, page)) | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 579 | goto keep_locked; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 |  | 
|  | 581 | free_it: | 
|  | 582 | unlock_page(page); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 583 | nr_reclaimed++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | if (!pagevec_add(&freed_pvec, page)) | 
|  | 585 | __pagevec_release_nonlru(&freed_pvec); | 
|  | 586 | continue; | 
|  | 587 |  | 
|  | 588 | activate_locked: | 
|  | 589 | SetPageActive(page); | 
|  | 590 | pgactivate++; | 
|  | 591 | keep_locked: | 
|  | 592 | unlock_page(page); | 
|  | 593 | keep: | 
|  | 594 | list_add(&page->lru, &ret_pages); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 595 | VM_BUG_ON(PageLRU(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | } | 
|  | 597 | list_splice(&ret_pages, page_list); | 
|  | 598 | if (pagevec_count(&freed_pvec)) | 
|  | 599 | __pagevec_release_nonlru(&freed_pvec); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 600 | count_vm_events(PGACTIVATE, pgactivate); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 601 | return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | } | 
|  | 603 |  | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 604 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | * zone->lru_lock is heavily contended.  Some of the functions that | 
|  | 606 | * shrink the lists perform better by taking out a batch of pages | 
|  | 607 | * and working on them outside the LRU lock. | 
|  | 608 | * | 
|  | 609 | * For pagecache intensive workloads, this function is the hottest | 
|  | 610 | * spot in the kernel (apart from copy_*_user functions). | 
|  | 611 | * | 
|  | 612 | * Appropriate locks must be held before calling this function. | 
|  | 613 | * | 
|  | 614 | * @nr_to_scan:	The number of pages to look through on the list. | 
|  | 615 | * @src:	The LRU list to pull pages off. | 
|  | 616 | * @dst:	The temp list to put pages on to. | 
|  | 617 | * @scanned:	The number of pages that were scanned. | 
|  | 618 | * | 
|  | 619 | * returns how many pages were moved onto *@dst. | 
|  | 620 | */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 621 | static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | 
|  | 622 | struct list_head *src, struct list_head *dst, | 
|  | 623 | unsigned long *scanned) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | { | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 625 | unsigned long nr_taken = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | struct page *page; | 
| Wu Fengguang | c9b02d9 | 2006-03-22 00:08:23 -0800 | [diff] [blame] | 627 | unsigned long scan; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 |  | 
| Wu Fengguang | c9b02d9 | 2006-03-22 00:08:23 -0800 | [diff] [blame] | 629 | for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { | 
| Nick Piggin | 7c8ee9a | 2006-03-22 00:08:03 -0800 | [diff] [blame] | 630 | struct list_head *target; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | page = lru_to_page(src); | 
|  | 632 | prefetchw_prev_lru_page(page, src, flags); | 
|  | 633 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 634 | VM_BUG_ON(!PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 635 |  | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 636 | list_del(&page->lru); | 
| Nick Piggin | 7c8ee9a | 2006-03-22 00:08:03 -0800 | [diff] [blame] | 637 | target = src; | 
|  | 638 | if (likely(get_page_unless_zero(page))) { | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 639 | /* | 
| Nick Piggin | 7c8ee9a | 2006-03-22 00:08:03 -0800 | [diff] [blame] | 640 | * Be careful not to clear PageLRU until after we're | 
|  | 641 | * sure the page is not being freed elsewhere -- the | 
|  | 642 | * page release code relies on it. | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 643 | */ | 
| Nick Piggin | 7c8ee9a | 2006-03-22 00:08:03 -0800 | [diff] [blame] | 644 | ClearPageLRU(page); | 
|  | 645 | target = dst; | 
|  | 646 | nr_taken++; | 
|  | 647 | } /* else it is being freed elsewhere */ | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 648 |  | 
| Nick Piggin | 7c8ee9a | 2006-03-22 00:08:03 -0800 | [diff] [blame] | 649 | list_add(&page->lru, target); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } | 
|  | 651 |  | 
|  | 652 | *scanned = scan; | 
|  | 653 | return nr_taken; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | /* | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 657 | * shrink_inactive_list() is a helper for shrink_zone().  It returns the number | 
|  | 658 | * of reclaimed pages | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | */ | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 660 | static unsigned long shrink_inactive_list(unsigned long max_scan, | 
|  | 661 | struct zone *zone, struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | { | 
|  | 663 | LIST_HEAD(page_list); | 
|  | 664 | struct pagevec pvec; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 665 | unsigned long nr_scanned = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 666 | unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 |  | 
|  | 668 | pagevec_init(&pvec, 1); | 
|  | 669 |  | 
|  | 670 | lru_add_drain(); | 
|  | 671 | spin_lock_irq(&zone->lru_lock); | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 672 | do { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | struct page *page; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 674 | unsigned long nr_taken; | 
|  | 675 | unsigned long nr_scan; | 
|  | 676 | unsigned long nr_freed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 |  | 
|  | 678 | nr_taken = isolate_lru_pages(sc->swap_cluster_max, | 
|  | 679 | &zone->inactive_list, | 
|  | 680 | &page_list, &nr_scan); | 
|  | 681 | zone->nr_inactive -= nr_taken; | 
|  | 682 | zone->pages_scanned += nr_scan; | 
|  | 683 | spin_unlock_irq(&zone->lru_lock); | 
|  | 684 |  | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 685 | nr_scanned += nr_scan; | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 686 | nr_freed = shrink_page_list(&page_list, sc); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 687 | nr_reclaimed += nr_freed; | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 688 | local_irq_disable(); | 
|  | 689 | if (current_is_kswapd()) { | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 690 | __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); | 
|  | 691 | __count_vm_events(KSWAPD_STEAL, nr_freed); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 692 | } else | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 693 | __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); | 
|  | 694 | __count_vm_events(PGACTIVATE, nr_freed); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 695 |  | 
| Wu Fengguang | fb8d14e | 2006-03-22 00:08:28 -0800 | [diff] [blame] | 696 | if (nr_taken == 0) | 
|  | 697 | goto done; | 
|  | 698 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 699 | spin_lock(&zone->lru_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | /* | 
|  | 701 | * Put back any unfreeable pages. | 
|  | 702 | */ | 
|  | 703 | while (!list_empty(&page_list)) { | 
|  | 704 | page = lru_to_page(&page_list); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 705 | VM_BUG_ON(PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 706 | SetPageLRU(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | list_del(&page->lru); | 
|  | 708 | if (PageActive(page)) | 
|  | 709 | add_page_to_active_list(zone, page); | 
|  | 710 | else | 
|  | 711 | add_page_to_inactive_list(zone, page); | 
|  | 712 | if (!pagevec_add(&pvec, page)) { | 
|  | 713 | spin_unlock_irq(&zone->lru_lock); | 
|  | 714 | __pagevec_release(&pvec); | 
|  | 715 | spin_lock_irq(&zone->lru_lock); | 
|  | 716 | } | 
|  | 717 | } | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 718 | } while (nr_scanned < max_scan); | 
| Wu Fengguang | fb8d14e | 2006-03-22 00:08:28 -0800 | [diff] [blame] | 719 | spin_unlock(&zone->lru_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | done: | 
| Wu Fengguang | fb8d14e | 2006-03-22 00:08:28 -0800 | [diff] [blame] | 721 | local_irq_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | pagevec_release(&pvec); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 723 | return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | } | 
|  | 725 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 726 | /* | 
|  | 727 | * We are about to scan this zone at a certain priority level.  If that priority | 
|  | 728 | * level is smaller (ie: more urgent) than the previous priority, then note | 
|  | 729 | * that priority level within the zone.  This is done so that when the next | 
|  | 730 | * process comes in to scan this zone, it will immediately start out at this | 
|  | 731 | * priority level rather than having to build up its own scanning priority. | 
|  | 732 | * Here, this priority affects only the reclaim-mapped threshold. | 
|  | 733 | */ | 
|  | 734 | static inline void note_zone_scanning_priority(struct zone *zone, int priority) | 
|  | 735 | { | 
|  | 736 | if (priority < zone->prev_priority) | 
|  | 737 | zone->prev_priority = priority; | 
|  | 738 | } | 
|  | 739 |  | 
| Nick Piggin | 4ff1ffb | 2006-09-25 23:31:28 -0700 | [diff] [blame] | 740 | static inline int zone_is_near_oom(struct zone *zone) | 
|  | 741 | { | 
|  | 742 | return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3; | 
|  | 743 | } | 
|  | 744 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | /* | 
|  | 746 | * This moves pages from the active list to the inactive list. | 
|  | 747 | * | 
|  | 748 | * We move them the other way if the page is referenced by one or more | 
|  | 749 | * processes, from rmap. | 
|  | 750 | * | 
|  | 751 | * If the pages are mostly unmapped, the processing is fast and it is | 
|  | 752 | * appropriate to hold zone->lru_lock across the whole operation.  But if | 
|  | 753 | * the pages are mapped, the processing is slow (page_referenced()) so we | 
|  | 754 | * should drop zone->lru_lock around each page.  It's impossible to balance | 
|  | 755 | * this, so instead we remove the pages from the LRU while processing them. | 
|  | 756 | * It is safe to rely on PG_active against the non-LRU pages in here because | 
|  | 757 | * nobody will play with that bit on a non-LRU page. | 
|  | 758 | * | 
|  | 759 | * The downside is that we have to touch page->_count against each page. | 
|  | 760 | * But we had to alter page->flags anyway. | 
|  | 761 | */ | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 762 | static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | 
| Martin Bligh | bbdb396 | 2006-10-28 10:38:25 -0700 | [diff] [blame] | 763 | struct scan_control *sc, int priority) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | { | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 765 | unsigned long pgmoved; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | int pgdeactivate = 0; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 767 | unsigned long pgscanned; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | LIST_HEAD(l_hold);	/* The pages which were snipped off */ | 
|  | 769 | LIST_HEAD(l_inactive);	/* Pages to go onto the inactive_list */ | 
|  | 770 | LIST_HEAD(l_active);	/* Pages to go onto the active_list */ | 
|  | 771 | struct page *page; | 
|  | 772 | struct pagevec pvec; | 
|  | 773 | int reclaim_mapped = 0; | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 774 |  | 
| Christoph Lameter | 6e5ef1a | 2006-03-22 00:08:45 -0800 | [diff] [blame] | 775 | if (sc->may_swap) { | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 776 | long mapped_ratio; | 
|  | 777 | long distress; | 
|  | 778 | long swap_tendency; | 
|  | 779 |  | 
| Nick Piggin | 4ff1ffb | 2006-09-25 23:31:28 -0700 | [diff] [blame] | 780 | if (zone_is_near_oom(zone)) | 
|  | 781 | goto force_reclaim_mapped; | 
|  | 782 |  | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 783 | /* | 
|  | 784 | * `distress' is a measure of how much trouble we're having | 
|  | 785 | * reclaiming pages.  0 -> no problems.  100 -> great trouble. | 
|  | 786 | */ | 
| Martin Bligh | bbdb396 | 2006-10-28 10:38:25 -0700 | [diff] [blame] | 787 | distress = 100 >> min(zone->prev_priority, priority); | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 788 |  | 
|  | 789 | /* | 
|  | 790 | * The point of this algorithm is to decide when to start | 
|  | 791 | * reclaiming mapped memory instead of just pagecache.  Work out | 
|  | 792 | * how much memory | 
|  | 793 | * is mapped. | 
|  | 794 | */ | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 795 | mapped_ratio = ((global_page_state(NR_FILE_MAPPED) + | 
|  | 796 | global_page_state(NR_ANON_PAGES)) * 100) / | 
| Christoph Lameter | bf02cf4 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 797 | vm_total_pages; | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 798 |  | 
|  | 799 | /* | 
|  | 800 | * Now decide how much we really want to unmap some pages.  The | 
|  | 801 | * mapped ratio is downgraded - just because there's a lot of | 
|  | 802 | * mapped memory doesn't necessarily mean that page reclaim | 
|  | 803 | * isn't succeeding. | 
|  | 804 | * | 
|  | 805 | * The distress ratio is important - we don't want to start | 
|  | 806 | * going oom. | 
|  | 807 | * | 
|  | 808 | * A 100% value of vm_swappiness overrides this algorithm | 
|  | 809 | * altogether. | 
|  | 810 | */ | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 811 | swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 812 |  | 
|  | 813 | /* | 
|  | 814 | * Now use this metric to decide whether to start moving mapped | 
|  | 815 | * memory onto the inactive list. | 
|  | 816 | */ | 
|  | 817 | if (swap_tendency >= 100) | 
| Nick Piggin | 4ff1ffb | 2006-09-25 23:31:28 -0700 | [diff] [blame] | 818 | force_reclaim_mapped: | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 819 | reclaim_mapped = 1; | 
|  | 820 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 |  | 
|  | 822 | lru_add_drain(); | 
|  | 823 | spin_lock_irq(&zone->lru_lock); | 
|  | 824 | pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, | 
|  | 825 | &l_hold, &pgscanned); | 
|  | 826 | zone->pages_scanned += pgscanned; | 
|  | 827 | zone->nr_active -= pgmoved; | 
|  | 828 | spin_unlock_irq(&zone->lru_lock); | 
|  | 829 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | while (!list_empty(&l_hold)) { | 
|  | 831 | cond_resched(); | 
|  | 832 | page = lru_to_page(&l_hold); | 
|  | 833 | list_del(&page->lru); | 
|  | 834 | if (page_mapped(page)) { | 
|  | 835 | if (!reclaim_mapped || | 
|  | 836 | (total_swap_pages == 0 && PageAnon(page)) || | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 837 | page_referenced(page, 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | list_add(&page->lru, &l_active); | 
|  | 839 | continue; | 
|  | 840 | } | 
|  | 841 | } | 
|  | 842 | list_add(&page->lru, &l_inactive); | 
|  | 843 | } | 
|  | 844 |  | 
|  | 845 | pagevec_init(&pvec, 1); | 
|  | 846 | pgmoved = 0; | 
|  | 847 | spin_lock_irq(&zone->lru_lock); | 
|  | 848 | while (!list_empty(&l_inactive)) { | 
|  | 849 | page = lru_to_page(&l_inactive); | 
|  | 850 | prefetchw_prev_lru_page(page, &l_inactive, flags); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 851 | VM_BUG_ON(PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 852 | SetPageLRU(page); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 853 | VM_BUG_ON(!PageActive(page)); | 
| Nick Piggin | 4c84cac | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 854 | ClearPageActive(page); | 
|  | 855 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | list_move(&page->lru, &zone->inactive_list); | 
|  | 857 | pgmoved++; | 
|  | 858 | if (!pagevec_add(&pvec, page)) { | 
|  | 859 | zone->nr_inactive += pgmoved; | 
|  | 860 | spin_unlock_irq(&zone->lru_lock); | 
|  | 861 | pgdeactivate += pgmoved; | 
|  | 862 | pgmoved = 0; | 
|  | 863 | if (buffer_heads_over_limit) | 
|  | 864 | pagevec_strip(&pvec); | 
|  | 865 | __pagevec_release(&pvec); | 
|  | 866 | spin_lock_irq(&zone->lru_lock); | 
|  | 867 | } | 
|  | 868 | } | 
|  | 869 | zone->nr_inactive += pgmoved; | 
|  | 870 | pgdeactivate += pgmoved; | 
|  | 871 | if (buffer_heads_over_limit) { | 
|  | 872 | spin_unlock_irq(&zone->lru_lock); | 
|  | 873 | pagevec_strip(&pvec); | 
|  | 874 | spin_lock_irq(&zone->lru_lock); | 
|  | 875 | } | 
|  | 876 |  | 
|  | 877 | pgmoved = 0; | 
|  | 878 | while (!list_empty(&l_active)) { | 
|  | 879 | page = lru_to_page(&l_active); | 
|  | 880 | prefetchw_prev_lru_page(page, &l_active, flags); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 881 | VM_BUG_ON(PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 882 | SetPageLRU(page); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 883 | VM_BUG_ON(!PageActive(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | list_move(&page->lru, &zone->active_list); | 
|  | 885 | pgmoved++; | 
|  | 886 | if (!pagevec_add(&pvec, page)) { | 
|  | 887 | zone->nr_active += pgmoved; | 
|  | 888 | pgmoved = 0; | 
|  | 889 | spin_unlock_irq(&zone->lru_lock); | 
|  | 890 | __pagevec_release(&pvec); | 
|  | 891 | spin_lock_irq(&zone->lru_lock); | 
|  | 892 | } | 
|  | 893 | } | 
|  | 894 | zone->nr_active += pgmoved; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 896 | __count_zone_vm_events(PGREFILL, zone, pgscanned); | 
|  | 897 | __count_vm_events(PGDEACTIVATE, pgdeactivate); | 
|  | 898 | spin_unlock_irq(&zone->lru_lock); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 899 |  | 
|  | 900 | pagevec_release(&pvec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | } | 
|  | 902 |  | 
|  | 903 | /* | 
|  | 904 | * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim. | 
|  | 905 | */ | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 906 | static unsigned long shrink_zone(int priority, struct zone *zone, | 
|  | 907 | struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | { | 
|  | 909 | unsigned long nr_active; | 
|  | 910 | unsigned long nr_inactive; | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 911 | unsigned long nr_to_scan; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 912 | unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 |  | 
| Martin Hicks | 53e9a61 | 2005-09-03 15:54:51 -0700 | [diff] [blame] | 914 | atomic_inc(&zone->reclaim_in_progress); | 
|  | 915 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | /* | 
|  | 917 | * Add one to `nr_to_scan' just to make sure that the kernel will | 
|  | 918 | * slowly sift through the active list. | 
|  | 919 | */ | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 920 | zone->nr_scan_active += (zone->nr_active >> priority) + 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | nr_active = zone->nr_scan_active; | 
|  | 922 | if (nr_active >= sc->swap_cluster_max) | 
|  | 923 | zone->nr_scan_active = 0; | 
|  | 924 | else | 
|  | 925 | nr_active = 0; | 
|  | 926 |  | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 927 | zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | nr_inactive = zone->nr_scan_inactive; | 
|  | 929 | if (nr_inactive >= sc->swap_cluster_max) | 
|  | 930 | zone->nr_scan_inactive = 0; | 
|  | 931 | else | 
|  | 932 | nr_inactive = 0; | 
|  | 933 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | while (nr_active || nr_inactive) { | 
|  | 935 | if (nr_active) { | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 936 | nr_to_scan = min(nr_active, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 | (unsigned long)sc->swap_cluster_max); | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 938 | nr_active -= nr_to_scan; | 
| Martin Bligh | bbdb396 | 2006-10-28 10:38:25 -0700 | [diff] [blame] | 939 | shrink_active_list(nr_to_scan, zone, sc, priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | } | 
|  | 941 |  | 
|  | 942 | if (nr_inactive) { | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 943 | nr_to_scan = min(nr_inactive, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | (unsigned long)sc->swap_cluster_max); | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 945 | nr_inactive -= nr_to_scan; | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 946 | nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, | 
|  | 947 | sc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | } | 
|  | 949 | } | 
|  | 950 |  | 
|  | 951 | throttle_vm_writeout(); | 
| Martin Hicks | 53e9a61 | 2005-09-03 15:54:51 -0700 | [diff] [blame] | 952 |  | 
|  | 953 | atomic_dec(&zone->reclaim_in_progress); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 954 | return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | } | 
|  | 956 |  | 
|  | 957 | /* | 
|  | 958 | * This is the direct reclaim path, for page-allocating processes.  We only | 
|  | 959 | * try to reclaim pages from zones which will satisfy the caller's allocation | 
|  | 960 | * request. | 
|  | 961 | * | 
|  | 962 | * We reclaim from a zone even if that zone is over pages_high.  Because: | 
|  | 963 | * a) The caller may be trying to free *extra* pages to satisfy a higher-order | 
|  | 964 | *    allocation or | 
|  | 965 | * b) The zones may be over pages_high but they must go *over* pages_high to | 
|  | 966 | *    satisfy the `incremental min' zone defense algorithm. | 
|  | 967 | * | 
|  | 968 | * Returns the number of reclaimed pages. | 
|  | 969 | * | 
|  | 970 | * If a zone is deemed to be full of pinned pages then just give it a light | 
|  | 971 | * scan then give up on it. | 
|  | 972 | */ | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 973 | static unsigned long shrink_zones(int priority, struct zone **zones, | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 974 | struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | { | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 976 | unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | int i; | 
|  | 978 |  | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 979 | sc->all_unreclaimable = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | for (i = 0; zones[i] != NULL; i++) { | 
|  | 981 | struct zone *zone = zones[i]; | 
|  | 982 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 983 | if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | continue; | 
|  | 985 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 986 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | continue; | 
|  | 988 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 989 | note_zone_scanning_priority(zone, priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 990 |  | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 991 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | continue;	/* Let kswapd poll it */ | 
|  | 993 |  | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 994 | sc->all_unreclaimable = 0; | 
|  | 995 |  | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 996 | nr_reclaimed += shrink_zone(priority, zone, sc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | } | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 998 | return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | } | 
|  | 1000 |  | 
|  | 1001 | /* | 
|  | 1002 | * This is the main entry point to direct page reclaim. | 
|  | 1003 | * | 
|  | 1004 | * If a full scan of the inactive list fails to free enough memory then we | 
|  | 1005 | * are "out of memory" and something needs to be killed. | 
|  | 1006 | * | 
|  | 1007 | * If the caller is !__GFP_FS then the probability of a failure is reasonably | 
|  | 1008 | * high - the zone may be full of dirty or under-writeback pages, which this | 
|  | 1009 | * caller can't do much about.  We kick pdflush and take explicit naps in the | 
|  | 1010 | * hope that some of these pages can be written.  But if the allocating task | 
|  | 1011 | * holds filesystem locks which prevent writeout this might not work, and the | 
|  | 1012 | * allocation attempt will fail. | 
|  | 1013 | */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1014 | unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | { | 
|  | 1016 | int priority; | 
|  | 1017 | int ret = 0; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1018 | unsigned long total_scanned = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1019 | unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | struct reclaim_state *reclaim_state = current->reclaim_state; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | unsigned long lru_pages = 0; | 
|  | 1022 | int i; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1023 | struct scan_control sc = { | 
|  | 1024 | .gfp_mask = gfp_mask, | 
|  | 1025 | .may_writepage = !laptop_mode, | 
|  | 1026 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 
|  | 1027 | .may_swap = 1, | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1028 | .swappiness = vm_swappiness, | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1029 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1031 | count_vm_event(ALLOCSTALL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 |  | 
|  | 1033 | for (i = 0; zones[i] != NULL; i++) { | 
|  | 1034 | struct zone *zone = zones[i]; | 
|  | 1035 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 1036 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | continue; | 
|  | 1038 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | lru_pages += zone->nr_active + zone->nr_inactive; | 
|  | 1040 | } | 
|  | 1041 |  | 
|  | 1042 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | sc.nr_scanned = 0; | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 1044 | if (!priority) | 
|  | 1045 | disable_swap_token(); | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 1046 | nr_reclaimed += shrink_zones(priority, zones, &sc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); | 
|  | 1048 | if (reclaim_state) { | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1049 | nr_reclaimed += reclaim_state->reclaimed_slab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | reclaim_state->reclaimed_slab = 0; | 
|  | 1051 | } | 
|  | 1052 | total_scanned += sc.nr_scanned; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1053 | if (nr_reclaimed >= sc.swap_cluster_max) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | ret = 1; | 
|  | 1055 | goto out; | 
|  | 1056 | } | 
|  | 1057 |  | 
|  | 1058 | /* | 
|  | 1059 | * Try to write back as many pages as we just scanned.  This | 
|  | 1060 | * tends to cause slow streaming writers to write data to the | 
|  | 1061 | * disk smoothly, at the dirtying rate, which is nice.   But | 
|  | 1062 | * that's undesirable in laptop mode, where we *want* lumpy | 
|  | 1063 | * writeout.  So in laptop mode, write out the whole world. | 
|  | 1064 | */ | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1065 | if (total_scanned > sc.swap_cluster_max + | 
|  | 1066 | sc.swap_cluster_max / 2) { | 
| Pekka J Enberg | 687a21c | 2005-06-28 20:44:55 -0700 | [diff] [blame] | 1067 | wakeup_pdflush(laptop_mode ? 0 : total_scanned); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | sc.may_writepage = 1; | 
|  | 1069 | } | 
|  | 1070 |  | 
|  | 1071 | /* Take a nap, wait for some writeback to complete */ | 
|  | 1072 | if (sc.nr_scanned && priority < DEF_PRIORITY - 2) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1073 | congestion_wait(WRITE, HZ/10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | } | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 1075 | /* top priority shrink_caches still had more to do? don't OOM, then */ | 
|  | 1076 | if (!sc.all_unreclaimable) | 
|  | 1077 | ret = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | out: | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1079 | /* | 
|  | 1080 | * Now that we've scanned all the zones at this priority level, note | 
|  | 1081 | * that level within the zone so that the next thread which performs | 
|  | 1082 | * scanning of this zone will immediately start out at this priority | 
|  | 1083 | * level.  This affects only the decision whether or not to bring | 
|  | 1084 | * mapped pages onto the inactive list. | 
|  | 1085 | */ | 
|  | 1086 | if (priority < 0) | 
|  | 1087 | priority = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | for (i = 0; zones[i] != 0; i++) { | 
|  | 1089 | struct zone *zone = zones[i]; | 
|  | 1090 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 1091 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | continue; | 
|  | 1093 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1094 | zone->prev_priority = priority; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | } | 
|  | 1096 | return ret; | 
|  | 1097 | } | 
|  | 1098 |  | 
|  | 1099 | /* | 
|  | 1100 | * For kswapd, balance_pgdat() will work across all this node's zones until | 
|  | 1101 | * they are all at pages_high. | 
|  | 1102 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | * Returns the number of pages which were actually freed. | 
|  | 1104 | * | 
|  | 1105 | * There is special handling here for zones which are full of pinned pages. | 
|  | 1106 | * This can happen if the pages are all mlocked, or if they are all used by | 
|  | 1107 | * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb. | 
|  | 1108 | * What we do is to detect the case where all pages in the zone have been | 
|  | 1109 | * scanned twice and there has been zero successful reclaim.  Mark the zone as | 
|  | 1110 | * dead and from now on, only perform a short scan.  Basically we're polling | 
|  | 1111 | * the zone for when the problem goes away. | 
|  | 1112 | * | 
|  | 1113 | * kswapd scans the zones in the highmem->normal->dma direction.  It skips | 
|  | 1114 | * zones which have free_pages > pages_high, but once a zone is found to have | 
|  | 1115 | * free_pages <= pages_high, we scan that zone and the lower zones regardless | 
|  | 1116 | * of the number of free pages in the lower zones.  This interoperates with | 
|  | 1117 | * the page allocator fallback scheme to ensure that aging of pages is balanced | 
|  | 1118 | * across the zones. | 
|  | 1119 | */ | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1120 | static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | int all_zones_ok; | 
|  | 1123 | int priority; | 
|  | 1124 | int i; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1125 | unsigned long total_scanned; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1126 | unsigned long nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | struct reclaim_state *reclaim_state = current->reclaim_state; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1128 | struct scan_control sc = { | 
|  | 1129 | .gfp_mask = GFP_KERNEL, | 
|  | 1130 | .may_swap = 1, | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1131 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 
|  | 1132 | .swappiness = vm_swappiness, | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1133 | }; | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1134 | /* | 
|  | 1135 | * temp_priority is used to remember the scanning priority at which | 
|  | 1136 | * this zone was successfully refilled to free_pages == pages_high. | 
|  | 1137 | */ | 
|  | 1138 | int temp_priority[MAX_NR_ZONES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 |  | 
|  | 1140 | loop_again: | 
|  | 1141 | total_scanned = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1142 | nr_reclaimed = 0; | 
| Christoph Lameter | c0bbbc7 | 2006-06-11 15:22:26 -0700 | [diff] [blame] | 1143 | sc.may_writepage = !laptop_mode; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1144 | count_vm_event(PAGEOUTRUN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1146 | for (i = 0; i < pgdat->nr_zones; i++) | 
|  | 1147 | temp_priority[i] = DEF_PRIORITY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 |  | 
|  | 1149 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 
|  | 1150 | int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */ | 
|  | 1151 | unsigned long lru_pages = 0; | 
|  | 1152 |  | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 1153 | /* The swap token gets in the way of swapout... */ | 
|  | 1154 | if (!priority) | 
|  | 1155 | disable_swap_token(); | 
|  | 1156 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | all_zones_ok = 1; | 
|  | 1158 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1159 | /* | 
|  | 1160 | * Scan in the highmem->dma direction for the highest | 
|  | 1161 | * zone which needs scanning | 
|  | 1162 | */ | 
|  | 1163 | for (i = pgdat->nr_zones - 1; i >= 0; i--) { | 
|  | 1164 | struct zone *zone = pgdat->node_zones + i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1166 | if (!populated_zone(zone)) | 
|  | 1167 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1168 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1169 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 
|  | 1170 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1172 | if (!zone_watermark_ok(zone, order, zone->pages_high, | 
|  | 1173 | 0, 0)) { | 
|  | 1174 | end_zone = i; | 
|  | 1175 | goto scan; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | } | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1178 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | scan: | 
|  | 1180 | for (i = 0; i <= end_zone; i++) { | 
|  | 1181 | struct zone *zone = pgdat->node_zones + i; | 
|  | 1182 |  | 
|  | 1183 | lru_pages += zone->nr_active + zone->nr_inactive; | 
|  | 1184 | } | 
|  | 1185 |  | 
|  | 1186 | /* | 
|  | 1187 | * Now scan the zone in the dma->highmem direction, stopping | 
|  | 1188 | * at the last zone which needs scanning. | 
|  | 1189 | * | 
|  | 1190 | * We do this because the page allocator works in the opposite | 
|  | 1191 | * direction.  This prevents the page allocator from allocating | 
|  | 1192 | * pages behind kswapd's direction of progress, which would | 
|  | 1193 | * cause too much scanning of the lower zones. | 
|  | 1194 | */ | 
|  | 1195 | for (i = 0; i <= end_zone; i++) { | 
|  | 1196 | struct zone *zone = pgdat->node_zones + i; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 1197 | int nr_slab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1199 | if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | continue; | 
|  | 1201 |  | 
|  | 1202 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 
|  | 1203 | continue; | 
|  | 1204 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1205 | if (!zone_watermark_ok(zone, order, zone->pages_high, | 
|  | 1206 | end_zone, 0)) | 
|  | 1207 | all_zones_ok = 0; | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1208 | temp_priority[i] = priority; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | sc.nr_scanned = 0; | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1210 | note_zone_scanning_priority(zone, priority); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1211 | nr_reclaimed += shrink_zone(priority, zone, &sc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | reclaim_state->reclaimed_slab = 0; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 1213 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 
|  | 1214 | lru_pages); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1215 | nr_reclaimed += reclaim_state->reclaimed_slab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | total_scanned += sc.nr_scanned; | 
|  | 1217 | if (zone->all_unreclaimable) | 
|  | 1218 | continue; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 1219 | if (nr_slab == 0 && zone->pages_scanned >= | 
| Nick Piggin | 4ff1ffb | 2006-09-25 23:31:28 -0700 | [diff] [blame] | 1220 | (zone->nr_active + zone->nr_inactive) * 6) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | zone->all_unreclaimable = 1; | 
|  | 1222 | /* | 
|  | 1223 | * If we've done a decent amount of scanning and | 
|  | 1224 | * the reclaim ratio is low, start doing writepage | 
|  | 1225 | * even in laptop mode | 
|  | 1226 | */ | 
|  | 1227 | if (total_scanned > SWAP_CLUSTER_MAX * 2 && | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1228 | total_scanned > nr_reclaimed + nr_reclaimed / 2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | sc.may_writepage = 1; | 
|  | 1230 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | if (all_zones_ok) | 
|  | 1232 | break;		/* kswapd: all done */ | 
|  | 1233 | /* | 
|  | 1234 | * OK, kswapd is getting into trouble.  Take a nap, then take | 
|  | 1235 | * another pass across the zones. | 
|  | 1236 | */ | 
|  | 1237 | if (total_scanned && priority < DEF_PRIORITY - 2) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1238 | congestion_wait(WRITE, HZ/10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 |  | 
|  | 1240 | /* | 
|  | 1241 | * We do this so kswapd doesn't build up large priorities for | 
|  | 1242 | * example when it is freeing in parallel with allocators. It | 
|  | 1243 | * matches the direct reclaim path behaviour in terms of impact | 
|  | 1244 | * on zone->*_priority. | 
|  | 1245 | */ | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1246 | if (nr_reclaimed >= SWAP_CLUSTER_MAX) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | break; | 
|  | 1248 | } | 
|  | 1249 | out: | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1250 | /* | 
|  | 1251 | * Note within each zone the priority level at which this zone was | 
|  | 1252 | * brought into a happy state.  So that the next thread which scans this | 
|  | 1253 | * zone will start out at that priority level. | 
|  | 1254 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 | for (i = 0; i < pgdat->nr_zones; i++) { | 
|  | 1256 | struct zone *zone = pgdat->node_zones + i; | 
|  | 1257 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1258 | zone->prev_priority = temp_priority[i]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | } | 
|  | 1260 | if (!all_zones_ok) { | 
|  | 1261 | cond_resched(); | 
|  | 1262 | goto loop_again; | 
|  | 1263 | } | 
|  | 1264 |  | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1265 | return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | } | 
|  | 1267 |  | 
|  | 1268 | /* | 
|  | 1269 | * The background pageout daemon, started as a kernel thread | 
|  | 1270 | * from the init process. | 
|  | 1271 | * | 
|  | 1272 | * This basically trickles out pages so that we have _some_ | 
|  | 1273 | * free memory available even if there is no other activity | 
|  | 1274 | * that frees anything up. This is needed for things like routing | 
|  | 1275 | * etc, where we otherwise might have all activity going on in | 
|  | 1276 | * asynchronous contexts that cannot page things out. | 
|  | 1277 | * | 
|  | 1278 | * If there are applications that are active memory-allocators | 
|  | 1279 | * (most normal use), this basically shouldn't matter. | 
|  | 1280 | */ | 
|  | 1281 | static int kswapd(void *p) | 
|  | 1282 | { | 
|  | 1283 | unsigned long order; | 
|  | 1284 | pg_data_t *pgdat = (pg_data_t*)p; | 
|  | 1285 | struct task_struct *tsk = current; | 
|  | 1286 | DEFINE_WAIT(wait); | 
|  | 1287 | struct reclaim_state reclaim_state = { | 
|  | 1288 | .reclaimed_slab = 0, | 
|  | 1289 | }; | 
|  | 1290 | cpumask_t cpumask; | 
|  | 1291 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | cpumask = node_to_cpumask(pgdat->node_id); | 
|  | 1293 | if (!cpus_empty(cpumask)) | 
|  | 1294 | set_cpus_allowed(tsk, cpumask); | 
|  | 1295 | current->reclaim_state = &reclaim_state; | 
|  | 1296 |  | 
|  | 1297 | /* | 
|  | 1298 | * Tell the memory management that we're a "memory allocator", | 
|  | 1299 | * and that if we need more memory we should get access to it | 
|  | 1300 | * regardless (see "__alloc_pages()"). "kswapd" should | 
|  | 1301 | * never get caught in the normal page freeing logic. | 
|  | 1302 | * | 
|  | 1303 | * (Kswapd normally doesn't need memory anyway, but sometimes | 
|  | 1304 | * you need a small amount of memory in order to be able to | 
|  | 1305 | * page out something else, and this flag essentially protects | 
|  | 1306 | * us from recursively trying to free more memory as we're | 
|  | 1307 | * trying to free the first piece of memory in the first place). | 
|  | 1308 | */ | 
| Christoph Lameter | 930d915 | 2006-01-08 01:00:47 -0800 | [diff] [blame] | 1309 | tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 |  | 
|  | 1311 | order = 0; | 
|  | 1312 | for ( ; ; ) { | 
|  | 1313 | unsigned long new_order; | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1314 |  | 
|  | 1315 | try_to_freeze(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 |  | 
|  | 1317 | prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); | 
|  | 1318 | new_order = pgdat->kswapd_max_order; | 
|  | 1319 | pgdat->kswapd_max_order = 0; | 
|  | 1320 | if (order < new_order) { | 
|  | 1321 | /* | 
|  | 1322 | * Don't sleep if someone wants a larger 'order' | 
|  | 1323 | * allocation | 
|  | 1324 | */ | 
|  | 1325 | order = new_order; | 
|  | 1326 | } else { | 
|  | 1327 | schedule(); | 
|  | 1328 | order = pgdat->kswapd_max_order; | 
|  | 1329 | } | 
|  | 1330 | finish_wait(&pgdat->kswapd_wait, &wait); | 
|  | 1331 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1332 | balance_pgdat(pgdat, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | } | 
|  | 1334 | return 0; | 
|  | 1335 | } | 
|  | 1336 |  | 
|  | 1337 | /* | 
|  | 1338 | * A zone is low on free memory, so wake its kswapd task to service it. | 
|  | 1339 | */ | 
|  | 1340 | void wakeup_kswapd(struct zone *zone, int order) | 
|  | 1341 | { | 
|  | 1342 | pg_data_t *pgdat; | 
|  | 1343 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1344 | if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1345 | return; | 
|  | 1346 |  | 
|  | 1347 | pgdat = zone->zone_pgdat; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1348 | if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1349 | return; | 
|  | 1350 | if (pgdat->kswapd_max_order < order) | 
|  | 1351 | pgdat->kswapd_max_order = order; | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 1352 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | return; | 
| Con Kolivas | 8d0986e | 2005-09-13 01:25:07 -0700 | [diff] [blame] | 1354 | if (!waitqueue_active(&pgdat->kswapd_wait)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | return; | 
| Con Kolivas | 8d0986e | 2005-09-13 01:25:07 -0700 | [diff] [blame] | 1356 | wake_up_interruptible(&pgdat->kswapd_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | } | 
|  | 1358 |  | 
|  | 1359 | #ifdef CONFIG_PM | 
|  | 1360 | /* | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1361 | * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages | 
|  | 1362 | * from LRU lists system-wide, for given pass and priority, and returns the | 
|  | 1363 | * number of reclaimed pages | 
|  | 1364 | * | 
|  | 1365 | * For pass > 3 we also try to shrink the LRU lists that contain a few pages | 
|  | 1366 | */ | 
|  | 1367 | static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, | 
|  | 1368 | int prio, struct scan_control *sc) | 
|  | 1369 | { | 
|  | 1370 | struct zone *zone; | 
|  | 1371 | unsigned long nr_to_scan, ret = 0; | 
|  | 1372 |  | 
|  | 1373 | for_each_zone(zone) { | 
|  | 1374 |  | 
|  | 1375 | if (!populated_zone(zone)) | 
|  | 1376 | continue; | 
|  | 1377 |  | 
|  | 1378 | if (zone->all_unreclaimable && prio != DEF_PRIORITY) | 
|  | 1379 | continue; | 
|  | 1380 |  | 
|  | 1381 | /* For pass = 0 we don't shrink the active list */ | 
|  | 1382 | if (pass > 0) { | 
|  | 1383 | zone->nr_scan_active += (zone->nr_active >> prio) + 1; | 
|  | 1384 | if (zone->nr_scan_active >= nr_pages || pass > 3) { | 
|  | 1385 | zone->nr_scan_active = 0; | 
|  | 1386 | nr_to_scan = min(nr_pages, zone->nr_active); | 
| Martin Bligh | bbdb396 | 2006-10-28 10:38:25 -0700 | [diff] [blame] | 1387 | shrink_active_list(nr_to_scan, zone, sc, prio); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1388 | } | 
|  | 1389 | } | 
|  | 1390 |  | 
|  | 1391 | zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1; | 
|  | 1392 | if (zone->nr_scan_inactive >= nr_pages || pass > 3) { | 
|  | 1393 | zone->nr_scan_inactive = 0; | 
|  | 1394 | nr_to_scan = min(nr_pages, zone->nr_inactive); | 
|  | 1395 | ret += shrink_inactive_list(nr_to_scan, zone, sc); | 
|  | 1396 | if (ret >= nr_pages) | 
|  | 1397 | return ret; | 
|  | 1398 | } | 
|  | 1399 | } | 
|  | 1400 |  | 
|  | 1401 | return ret; | 
|  | 1402 | } | 
|  | 1403 |  | 
|  | 1404 | /* | 
|  | 1405 | * Try to free `nr_pages' of memory, system-wide, and return the number of | 
|  | 1406 | * freed pages. | 
|  | 1407 | * | 
|  | 1408 | * Rather than trying to age LRUs the aim is to preserve the overall | 
|  | 1409 | * LRU order by reclaiming preferentially | 
|  | 1410 | * inactive > active > active referenced > active mapped | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1412 | unsigned long shrink_all_memory(unsigned long nr_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1413 | { | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1414 | unsigned long lru_pages, nr_slab; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1415 | unsigned long ret = 0; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1416 | int pass; | 
|  | 1417 | struct reclaim_state reclaim_state; | 
|  | 1418 | struct zone *zone; | 
|  | 1419 | struct scan_control sc = { | 
|  | 1420 | .gfp_mask = GFP_KERNEL, | 
|  | 1421 | .may_swap = 0, | 
|  | 1422 | .swap_cluster_max = nr_pages, | 
|  | 1423 | .may_writepage = 1, | 
|  | 1424 | .swappiness = vm_swappiness, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 | }; | 
|  | 1426 |  | 
|  | 1427 | current->reclaim_state = &reclaim_state; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1428 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1429 | lru_pages = 0; | 
|  | 1430 | for_each_zone(zone) | 
|  | 1431 | lru_pages += zone->nr_active + zone->nr_inactive; | 
|  | 1432 |  | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1433 | nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1434 | /* If slab caches are huge, it's better to hit them first */ | 
|  | 1435 | while (nr_slab >= lru_pages) { | 
|  | 1436 | reclaim_state.reclaimed_slab = 0; | 
|  | 1437 | shrink_slab(nr_pages, sc.gfp_mask, lru_pages); | 
|  | 1438 | if (!reclaim_state.reclaimed_slab) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1439 | break; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1440 |  | 
|  | 1441 | ret += reclaim_state.reclaimed_slab; | 
|  | 1442 | if (ret >= nr_pages) | 
|  | 1443 | goto out; | 
|  | 1444 |  | 
|  | 1445 | nr_slab -= reclaim_state.reclaimed_slab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 | } | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1447 |  | 
|  | 1448 | /* | 
|  | 1449 | * We try to shrink LRUs in 5 passes: | 
|  | 1450 | * 0 = Reclaim from inactive_list only | 
|  | 1451 | * 1 = Reclaim from active list but don't reclaim mapped | 
|  | 1452 | * 2 = 2nd pass of type 1 | 
|  | 1453 | * 3 = Reclaim mapped (normal reclaim) | 
|  | 1454 | * 4 = 2nd pass of type 3 | 
|  | 1455 | */ | 
|  | 1456 | for (pass = 0; pass < 5; pass++) { | 
|  | 1457 | int prio; | 
|  | 1458 |  | 
|  | 1459 | /* Needed for shrinking slab caches later on */ | 
|  | 1460 | if (!lru_pages) | 
|  | 1461 | for_each_zone(zone) { | 
|  | 1462 | lru_pages += zone->nr_active; | 
|  | 1463 | lru_pages += zone->nr_inactive; | 
|  | 1464 | } | 
|  | 1465 |  | 
|  | 1466 | /* Force reclaiming mapped pages in the passes #3 and #4 */ | 
|  | 1467 | if (pass > 2) { | 
|  | 1468 | sc.may_swap = 1; | 
|  | 1469 | sc.swappiness = 100; | 
|  | 1470 | } | 
|  | 1471 |  | 
|  | 1472 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { | 
|  | 1473 | unsigned long nr_to_scan = nr_pages - ret; | 
|  | 1474 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1475 | sc.nr_scanned = 0; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1476 | ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); | 
|  | 1477 | if (ret >= nr_pages) | 
|  | 1478 | goto out; | 
|  | 1479 |  | 
|  | 1480 | reclaim_state.reclaimed_slab = 0; | 
|  | 1481 | shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); | 
|  | 1482 | ret += reclaim_state.reclaimed_slab; | 
|  | 1483 | if (ret >= nr_pages) | 
|  | 1484 | goto out; | 
|  | 1485 |  | 
|  | 1486 | if (sc.nr_scanned && prio < DEF_PRIORITY - 2) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1487 | congestion_wait(WRITE, HZ / 10); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | lru_pages = 0; | 
| Rafael J. Wysocki | 248a030 | 2006-03-22 00:09:04 -0800 | [diff] [blame] | 1491 | } | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1492 |  | 
|  | 1493 | /* | 
|  | 1494 | * If ret = 0, we could not shrink LRUs, but there may be something | 
|  | 1495 | * in slab caches | 
|  | 1496 | */ | 
|  | 1497 | if (!ret) | 
|  | 1498 | do { | 
|  | 1499 | reclaim_state.reclaimed_slab = 0; | 
|  | 1500 | shrink_slab(nr_pages, sc.gfp_mask, lru_pages); | 
|  | 1501 | ret += reclaim_state.reclaimed_slab; | 
|  | 1502 | } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); | 
|  | 1503 |  | 
|  | 1504 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | current->reclaim_state = NULL; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1506 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1507 | return ret; | 
|  | 1508 | } | 
|  | 1509 | #endif | 
|  | 1510 |  | 
|  | 1511 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 1512 | /* It's optimal to keep kswapds on the same CPUs as their memory, but | 
|  | 1513 | not required for correctness.  So if the last cpu in a node goes | 
|  | 1514 | away, we get changed to run anywhere: as the first one comes back, | 
|  | 1515 | restore their cpu bindings. */ | 
| Chandra Seetharaman | 9c7b216 | 2006-06-27 02:54:07 -0700 | [diff] [blame] | 1516 | static int __devinit cpu_callback(struct notifier_block *nfb, | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1517 | unsigned long action, void *hcpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 | { | 
|  | 1519 | pg_data_t *pgdat; | 
|  | 1520 | cpumask_t mask; | 
|  | 1521 |  | 
|  | 1522 | if (action == CPU_ONLINE) { | 
| KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 1523 | for_each_online_pgdat(pgdat) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1524 | mask = node_to_cpumask(pgdat->node_id); | 
|  | 1525 | if (any_online_cpu(mask) != NR_CPUS) | 
|  | 1526 | /* One of our CPUs online: restore mask */ | 
|  | 1527 | set_cpus_allowed(pgdat->kswapd, mask); | 
|  | 1528 | } | 
|  | 1529 | } | 
|  | 1530 | return NOTIFY_OK; | 
|  | 1531 | } | 
|  | 1532 | #endif /* CONFIG_HOTPLUG_CPU */ | 
|  | 1533 |  | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 1534 | /* | 
|  | 1535 | * This kswapd start function will be called by init and node-hot-add. | 
|  | 1536 | * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. | 
|  | 1537 | */ | 
|  | 1538 | int kswapd_run(int nid) | 
|  | 1539 | { | 
|  | 1540 | pg_data_t *pgdat = NODE_DATA(nid); | 
|  | 1541 | int ret = 0; | 
|  | 1542 |  | 
|  | 1543 | if (pgdat->kswapd) | 
|  | 1544 | return 0; | 
|  | 1545 |  | 
|  | 1546 | pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); | 
|  | 1547 | if (IS_ERR(pgdat->kswapd)) { | 
|  | 1548 | /* failure at boot is fatal */ | 
|  | 1549 | BUG_ON(system_state == SYSTEM_BOOTING); | 
|  | 1550 | printk("Failed to start kswapd on node %d\n",nid); | 
|  | 1551 | ret = -1; | 
|  | 1552 | } | 
|  | 1553 | return ret; | 
|  | 1554 | } | 
|  | 1555 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | static int __init kswapd_init(void) | 
|  | 1557 | { | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 1558 | int nid; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1559 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | swap_setup(); | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 1561 | for_each_online_node(nid) | 
|  | 1562 | kswapd_run(nid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | hotcpu_notifier(cpu_callback, 0); | 
|  | 1564 | return 0; | 
|  | 1565 | } | 
|  | 1566 |  | 
|  | 1567 | module_init(kswapd_init) | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1568 |  | 
|  | 1569 | #ifdef CONFIG_NUMA | 
|  | 1570 | /* | 
|  | 1571 | * Zone reclaim mode | 
|  | 1572 | * | 
|  | 1573 | * If non-zero call zone_reclaim when the number of free pages falls below | 
|  | 1574 | * the watermarks. | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1575 | */ | 
|  | 1576 | int zone_reclaim_mode __read_mostly; | 
|  | 1577 |  | 
| Christoph Lameter | 1b2ffb7 | 2006-02-01 03:05:34 -0800 | [diff] [blame] | 1578 | #define RECLAIM_OFF 0 | 
|  | 1579 | #define RECLAIM_ZONE (1<<0)	/* Run shrink_cache on the zone */ | 
|  | 1580 | #define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */ | 
|  | 1581 | #define RECLAIM_SWAP (1<<2)	/* Swap pages out during reclaim */ | 
|  | 1582 |  | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1583 | /* | 
| Christoph Lameter | a92f712 | 2006-02-01 03:05:32 -0800 | [diff] [blame] | 1584 | * Priority for ZONE_RECLAIM. This determines the fraction of pages | 
|  | 1585 | * of a node considered for each zone_reclaim. 4 scans 1/16th of | 
|  | 1586 | * a zone. | 
|  | 1587 | */ | 
|  | 1588 | #define ZONE_RECLAIM_PRIORITY 4 | 
|  | 1589 |  | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1590 | /* | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 1591 | * Percentage of pages in a zone that must be unmapped for zone_reclaim to | 
|  | 1592 | * occur. | 
|  | 1593 | */ | 
|  | 1594 | int sysctl_min_unmapped_ratio = 1; | 
|  | 1595 |  | 
|  | 1596 | /* | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1597 | * If the number of slab pages in a zone grows beyond this percentage then | 
|  | 1598 | * slab reclaim needs to occur. | 
|  | 1599 | */ | 
|  | 1600 | int sysctl_min_slab_ratio = 5; | 
|  | 1601 |  | 
|  | 1602 | /* | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1603 | * Try to free up some pages from this zone through reclaim. | 
|  | 1604 | */ | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1605 | static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1606 | { | 
| Christoph Lameter | 7fb2d46 | 2006-03-22 00:08:22 -0800 | [diff] [blame] | 1607 | /* Minimum pages needed in order to stay on node */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1608 | const unsigned long nr_pages = 1 << order; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1609 | struct task_struct *p = current; | 
|  | 1610 | struct reclaim_state reclaim_state; | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1611 | int priority; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1612 | unsigned long nr_reclaimed = 0; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1613 | struct scan_control sc = { | 
|  | 1614 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 
|  | 1615 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1616 | .swap_cluster_max = max_t(unsigned long, nr_pages, | 
|  | 1617 | SWAP_CLUSTER_MAX), | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1618 | .gfp_mask = gfp_mask, | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1619 | .swappiness = vm_swappiness, | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1620 | }; | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 1621 | unsigned long slab_reclaimable; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1622 |  | 
|  | 1623 | disable_swap_token(); | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1624 | cond_resched(); | 
| Christoph Lameter | d4f7796 | 2006-02-24 13:04:22 -0800 | [diff] [blame] | 1625 | /* | 
|  | 1626 | * We need to be able to allocate from the reserves for RECLAIM_SWAP | 
|  | 1627 | * and we also need to be able to write out pages for RECLAIM_WRITE | 
|  | 1628 | * and RECLAIM_SWAP. | 
|  | 1629 | */ | 
|  | 1630 | p->flags |= PF_MEMALLOC | PF_SWAPWRITE; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1631 | reclaim_state.reclaimed_slab = 0; | 
|  | 1632 | p->reclaim_state = &reclaim_state; | 
| Christoph Lameter | c84db23 | 2006-02-01 03:05:29 -0800 | [diff] [blame] | 1633 |  | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1634 | if (zone_page_state(zone, NR_FILE_PAGES) - | 
|  | 1635 | zone_page_state(zone, NR_FILE_MAPPED) > | 
|  | 1636 | zone->min_unmapped_pages) { | 
|  | 1637 | /* | 
|  | 1638 | * Free memory by calling shrink zone with increasing | 
|  | 1639 | * priorities until we have enough memory freed. | 
|  | 1640 | */ | 
|  | 1641 | priority = ZONE_RECLAIM_PRIORITY; | 
|  | 1642 | do { | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1643 | note_zone_scanning_priority(zone, priority); | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1644 | nr_reclaimed += shrink_zone(priority, zone, &sc); | 
|  | 1645 | priority--; | 
|  | 1646 | } while (priority >= 0 && nr_reclaimed < nr_pages); | 
|  | 1647 | } | 
| Christoph Lameter | c84db23 | 2006-02-01 03:05:29 -0800 | [diff] [blame] | 1648 |  | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 1649 | slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); | 
|  | 1650 | if (slab_reclaimable > zone->min_slab_pages) { | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 1651 | /* | 
| Christoph Lameter | 7fb2d46 | 2006-03-22 00:08:22 -0800 | [diff] [blame] | 1652 | * shrink_slab() does not currently allow us to determine how | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1653 | * many pages were freed in this zone. So we take the current | 
|  | 1654 | * number of slab pages and shake the slab until it is reduced | 
|  | 1655 | * by the same nr_pages that we used for reclaiming unmapped | 
|  | 1656 | * pages. | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 1657 | * | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1658 | * Note that shrink_slab will free memory on all zones and may | 
|  | 1659 | * take a long time. | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 1660 | */ | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1661 | while (shrink_slab(sc.nr_scanned, gfp_mask, order) && | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 1662 | zone_page_state(zone, NR_SLAB_RECLAIMABLE) > | 
|  | 1663 | slab_reclaimable - nr_pages) | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1664 | ; | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 1665 |  | 
|  | 1666 | /* | 
|  | 1667 | * Update nr_reclaimed by the number of slab pages we | 
|  | 1668 | * reclaimed from this zone. | 
|  | 1669 | */ | 
|  | 1670 | nr_reclaimed += slab_reclaimable - | 
|  | 1671 | zone_page_state(zone, NR_SLAB_RECLAIMABLE); | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 1672 | } | 
|  | 1673 |  | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1674 | p->reclaim_state = NULL; | 
| Christoph Lameter | d4f7796 | 2006-02-24 13:04:22 -0800 | [diff] [blame] | 1675 | current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1676 | return nr_reclaimed >= nr_pages; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1677 | } | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1678 |  | 
|  | 1679 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | 
|  | 1680 | { | 
|  | 1681 | cpumask_t mask; | 
|  | 1682 | int node_id; | 
|  | 1683 |  | 
|  | 1684 | /* | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1685 | * Zone reclaim reclaims unmapped file backed pages and | 
|  | 1686 | * slab pages if we are over the defined limits. | 
| Christoph Lameter | 34aa133 | 2006-06-30 01:55:37 -0700 | [diff] [blame] | 1687 | * | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 1688 | * A small portion of unmapped file backed pages is needed for | 
|  | 1689 | * file I/O otherwise pages read by file I/O will be immediately | 
|  | 1690 | * thrown out if the zone is overallocated. So we do not reclaim | 
|  | 1691 | * if less than a specified percentage of the zone is used by | 
|  | 1692 | * unmapped file backed pages. | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1693 | */ | 
| Christoph Lameter | 34aa133 | 2006-06-30 01:55:37 -0700 | [diff] [blame] | 1694 | if (zone_page_state(zone, NR_FILE_PAGES) - | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1695 | zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages | 
|  | 1696 | && zone_page_state(zone, NR_SLAB_RECLAIMABLE) | 
|  | 1697 | <= zone->min_slab_pages) | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 1698 | return 0; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1699 |  | 
|  | 1700 | /* | 
|  | 1701 | * Avoid concurrent zone reclaims, do not reclaim in a zone that does | 
|  | 1702 | * not have reclaimable pages and if we should not delay the allocation | 
|  | 1703 | * then do not scan. | 
|  | 1704 | */ | 
|  | 1705 | if (!(gfp_mask & __GFP_WAIT) || | 
|  | 1706 | zone->all_unreclaimable || | 
|  | 1707 | atomic_read(&zone->reclaim_in_progress) > 0 || | 
|  | 1708 | (current->flags & PF_MEMALLOC)) | 
|  | 1709 | return 0; | 
|  | 1710 |  | 
|  | 1711 | /* | 
|  | 1712 | * Only run zone reclaim on the local zone or on zones that do not | 
|  | 1713 | * have associated processors. This will favor the local processor | 
|  | 1714 | * over remote processors and spread off node memory allocations | 
|  | 1715 | * as wide as possible. | 
|  | 1716 | */ | 
| Christoph Lameter | 89fa302 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 1717 | node_id = zone_to_nid(zone); | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1718 | mask = node_to_cpumask(node_id); | 
|  | 1719 | if (!cpus_empty(mask) && node_id != numa_node_id()) | 
|  | 1720 | return 0; | 
|  | 1721 | return __zone_reclaim(zone, gfp_mask, order); | 
|  | 1722 | } | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1723 | #endif |