| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/vmscan.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 5 |  * | 
 | 6 |  *  Swap reorganised 29.12.95, Stephen Tweedie. | 
 | 7 |  *  kswapd added: 7.1.96  sct | 
 | 8 |  *  Removed kswapd_ctl limits, and swap out as many pages as needed | 
 | 9 |  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel. | 
 | 10 |  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). | 
 | 11 |  *  Multiqueue VM started 5.8.00, Rik van Riel. | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | #include <linux/mm.h> | 
 | 15 | #include <linux/module.h> | 
 | 16 | #include <linux/slab.h> | 
 | 17 | #include <linux/kernel_stat.h> | 
 | 18 | #include <linux/swap.h> | 
 | 19 | #include <linux/pagemap.h> | 
 | 20 | #include <linux/init.h> | 
 | 21 | #include <linux/highmem.h> | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 22 | #include <linux/vmstat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <linux/file.h> | 
 | 24 | #include <linux/writeback.h> | 
 | 25 | #include <linux/blkdev.h> | 
 | 26 | #include <linux/buffer_head.h>	/* for try_to_release_page(), | 
 | 27 | 					buffer_heads_over_limit */ | 
 | 28 | #include <linux/mm_inline.h> | 
 | 29 | #include <linux/pagevec.h> | 
 | 30 | #include <linux/backing-dev.h> | 
 | 31 | #include <linux/rmap.h> | 
 | 32 | #include <linux/topology.h> | 
 | 33 | #include <linux/cpu.h> | 
 | 34 | #include <linux/cpuset.h> | 
 | 35 | #include <linux/notifier.h> | 
 | 36 | #include <linux/rwsem.h> | 
| Rafael J. Wysocki | 248a030 | 2006-03-22 00:09:04 -0800 | [diff] [blame] | 37 | #include <linux/delay.h> | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 38 | #include <linux/kthread.h> | 
| Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 39 | #include <linux/freezer.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 40 | #include <linux/memcontrol.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
 | 42 | #include <asm/tlbflush.h> | 
 | 43 | #include <asm/div64.h> | 
 | 44 |  | 
 | 45 | #include <linux/swapops.h> | 
 | 46 |  | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 47 | #include "internal.h" | 
 | 48 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | struct scan_control { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | 	/* Incremented by the number of inactive pages that were scanned */ | 
 | 51 | 	unsigned long nr_scanned; | 
 | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | 	/* This context's GFP mask */ | 
| Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 54 | 	gfp_t gfp_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
 | 56 | 	int may_writepage; | 
 | 57 |  | 
| Christoph Lameter | f1fd106 | 2006-01-18 17:42:30 -0800 | [diff] [blame] | 58 | 	/* Can pages be swapped as part of reclaim? */ | 
 | 59 | 	int may_swap; | 
 | 60 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | 	/* This context's SWAP_CLUSTER_MAX. If freeing memory for | 
 | 62 | 	 * suspend, we effectively ignore SWAP_CLUSTER_MAX. | 
 | 63 | 	 * In this context, it doesn't matter that we scan the | 
 | 64 | 	 * whole list at once. */ | 
 | 65 | 	int swap_cluster_max; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 66 |  | 
 | 67 | 	int swappiness; | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 68 |  | 
 | 69 | 	int all_unreclaimable; | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 70 |  | 
 | 71 | 	int order; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 72 |  | 
 | 73 | 	/* Which cgroup do we reclaim from */ | 
 | 74 | 	struct mem_cgroup *mem_cgroup; | 
 | 75 |  | 
 | 76 | 	/* Pluggable isolate pages callback */ | 
 | 77 | 	unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst, | 
 | 78 | 			unsigned long *scanned, int order, int mode, | 
 | 79 | 			struct zone *z, struct mem_cgroup *mem_cont, | 
 | 80 | 			int active); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | }; | 
 | 82 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | 
 | 84 |  | 
 | 85 | #ifdef ARCH_HAS_PREFETCH | 
 | 86 | #define prefetch_prev_lru_page(_page, _base, _field)			\ | 
 | 87 | 	do {								\ | 
 | 88 | 		if ((_page)->lru.prev != _base) {			\ | 
 | 89 | 			struct page *prev;				\ | 
 | 90 | 									\ | 
 | 91 | 			prev = lru_to_page(&(_page->lru));		\ | 
 | 92 | 			prefetch(&prev->_field);			\ | 
 | 93 | 		}							\ | 
 | 94 | 	} while (0) | 
 | 95 | #else | 
 | 96 | #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) | 
 | 97 | #endif | 
 | 98 |  | 
 | 99 | #ifdef ARCH_HAS_PREFETCHW | 
 | 100 | #define prefetchw_prev_lru_page(_page, _base, _field)			\ | 
 | 101 | 	do {								\ | 
 | 102 | 		if ((_page)->lru.prev != _base) {			\ | 
 | 103 | 			struct page *prev;				\ | 
 | 104 | 									\ | 
 | 105 | 			prev = lru_to_page(&(_page->lru));		\ | 
 | 106 | 			prefetchw(&prev->_field);			\ | 
 | 107 | 		}							\ | 
 | 108 | 	} while (0) | 
 | 109 | #else | 
 | 110 | #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) | 
 | 111 | #endif | 
 | 112 |  | 
 | 113 | /* | 
 | 114 |  * From 0 .. 100.  Higher means more swappy. | 
 | 115 |  */ | 
 | 116 | int vm_swappiness = 60; | 
| Andrew Morton | bd1e22b | 2006-06-23 02:03:47 -0700 | [diff] [blame] | 117 | long vm_total_pages;	/* The total number of pages which the VM controls */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 |  | 
 | 119 | static LIST_HEAD(shrinker_list); | 
 | 120 | static DECLARE_RWSEM(shrinker_rwsem); | 
 | 121 |  | 
| Balbir Singh | 00f0b82 | 2008-03-04 14:28:39 -0800 | [diff] [blame] | 122 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
| KAMEZAWA Hiroyuki | 91a4547 | 2008-02-07 00:14:29 -0800 | [diff] [blame] | 123 | #define scan_global_lru(sc)	(!(sc)->mem_cgroup) | 
 | 124 | #else | 
 | 125 | #define scan_global_lru(sc)	(1) | 
 | 126 | #endif | 
 | 127 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | /* | 
 | 129 |  * Add a shrinker callback to be called from the vm | 
 | 130 |  */ | 
| Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 131 | void register_shrinker(struct shrinker *shrinker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | { | 
| Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 133 | 	shrinker->nr = 0; | 
 | 134 | 	down_write(&shrinker_rwsem); | 
 | 135 | 	list_add_tail(&shrinker->list, &shrinker_list); | 
 | 136 | 	up_write(&shrinker_rwsem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | } | 
| Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 138 | EXPORT_SYMBOL(register_shrinker); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 |  | 
 | 140 | /* | 
 | 141 |  * Remove one | 
 | 142 |  */ | 
| Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 143 | void unregister_shrinker(struct shrinker *shrinker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { | 
 | 145 | 	down_write(&shrinker_rwsem); | 
 | 146 | 	list_del(&shrinker->list); | 
 | 147 | 	up_write(&shrinker_rwsem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } | 
| Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 149 | EXPORT_SYMBOL(unregister_shrinker); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 |  | 
 | 151 | #define SHRINK_BATCH 128 | 
 | 152 | /* | 
 | 153 |  * Call the shrink functions to age shrinkable caches | 
 | 154 |  * | 
 | 155 |  * Here we assume it costs one seek to replace a lru page and that it also | 
 | 156 |  * takes a seek to recreate a cache object.  With this in mind we age equal | 
 | 157 |  * percentages of the lru and ageable caches.  This should balance the seeks | 
 | 158 |  * generated by these structures. | 
 | 159 |  * | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 160 |  * If the vm encountered mapped pages on the LRU it increase the pressure on | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 |  * slab to avoid swapping. | 
 | 162 |  * | 
 | 163 |  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. | 
 | 164 |  * | 
 | 165 |  * `lru_pages' represents the number of on-LRU pages in all the zones which | 
 | 166 |  * are eligible for the caller's allocation attempt.  It is used for balancing | 
 | 167 |  * slab reclaim versus page reclaim. | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 168 |  * | 
 | 169 |  * Returns the number of slab objects which we shrunk. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 |  */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 171 | unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | 
 | 172 | 			unsigned long lru_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | { | 
 | 174 | 	struct shrinker *shrinker; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 175 | 	unsigned long ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 |  | 
 | 177 | 	if (scanned == 0) | 
 | 178 | 		scanned = SWAP_CLUSTER_MAX; | 
 | 179 |  | 
 | 180 | 	if (!down_read_trylock(&shrinker_rwsem)) | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 181 | 		return 1;	/* Assume we'll be able to shrink next time */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 |  | 
 | 183 | 	list_for_each_entry(shrinker, &shrinker_list, list) { | 
 | 184 | 		unsigned long long delta; | 
 | 185 | 		unsigned long total_scan; | 
| Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 186 | 		unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 |  | 
 | 188 | 		delta = (4 * scanned) / shrinker->seeks; | 
| Andrea Arcangeli | ea164d7 | 2005-11-28 13:44:15 -0800 | [diff] [blame] | 189 | 		delta *= max_pass; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | 		do_div(delta, lru_pages + 1); | 
 | 191 | 		shrinker->nr += delta; | 
| Andrea Arcangeli | ea164d7 | 2005-11-28 13:44:15 -0800 | [diff] [blame] | 192 | 		if (shrinker->nr < 0) { | 
 | 193 | 			printk(KERN_ERR "%s: nr=%ld\n", | 
| Harvey Harrison | d40cee2 | 2008-04-30 00:55:07 -0700 | [diff] [blame] | 194 | 					__func__, shrinker->nr); | 
| Andrea Arcangeli | ea164d7 | 2005-11-28 13:44:15 -0800 | [diff] [blame] | 195 | 			shrinker->nr = max_pass; | 
 | 196 | 		} | 
 | 197 |  | 
 | 198 | 		/* | 
 | 199 | 		 * Avoid risking looping forever due to too large nr value: | 
 | 200 | 		 * never try to free more than twice the estimate number of | 
 | 201 | 		 * freeable entries. | 
 | 202 | 		 */ | 
 | 203 | 		if (shrinker->nr > max_pass * 2) | 
 | 204 | 			shrinker->nr = max_pass * 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
 | 206 | 		total_scan = shrinker->nr; | 
 | 207 | 		shrinker->nr = 0; | 
 | 208 |  | 
 | 209 | 		while (total_scan >= SHRINK_BATCH) { | 
 | 210 | 			long this_scan = SHRINK_BATCH; | 
 | 211 | 			int shrink_ret; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 212 | 			int nr_before; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 |  | 
| Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 214 | 			nr_before = (*shrinker->shrink)(0, gfp_mask); | 
 | 215 | 			shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | 			if (shrink_ret == -1) | 
 | 217 | 				break; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 218 | 			if (shrink_ret < nr_before) | 
 | 219 | 				ret += nr_before - shrink_ret; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 220 | 			count_vm_events(SLABS_SCANNED, this_scan); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | 			total_scan -= this_scan; | 
 | 222 |  | 
 | 223 | 			cond_resched(); | 
 | 224 | 		} | 
 | 225 |  | 
 | 226 | 		shrinker->nr += total_scan; | 
 | 227 | 	} | 
 | 228 | 	up_read(&shrinker_rwsem); | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 229 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | } | 
 | 231 |  | 
 | 232 | /* Called without lock on whether page is mapped, so answer is unstable */ | 
 | 233 | static inline int page_mapping_inuse(struct page *page) | 
 | 234 | { | 
 | 235 | 	struct address_space *mapping; | 
 | 236 |  | 
 | 237 | 	/* Page is in somebody's page tables. */ | 
 | 238 | 	if (page_mapped(page)) | 
 | 239 | 		return 1; | 
 | 240 |  | 
 | 241 | 	/* Be more reluctant to reclaim swapcache than pagecache */ | 
 | 242 | 	if (PageSwapCache(page)) | 
 | 243 | 		return 1; | 
 | 244 |  | 
 | 245 | 	mapping = page_mapping(page); | 
 | 246 | 	if (!mapping) | 
 | 247 | 		return 0; | 
 | 248 |  | 
 | 249 | 	/* File is mmap'd by somebody? */ | 
 | 250 | 	return mapping_mapped(mapping); | 
 | 251 | } | 
 | 252 |  | 
 | 253 | static inline int is_page_cache_freeable(struct page *page) | 
 | 254 | { | 
 | 255 | 	return page_count(page) - !!PagePrivate(page) == 2; | 
 | 256 | } | 
 | 257 |  | 
 | 258 | static int may_write_to_queue(struct backing_dev_info *bdi) | 
 | 259 | { | 
| Christoph Lameter | 930d915 | 2006-01-08 01:00:47 -0800 | [diff] [blame] | 260 | 	if (current->flags & PF_SWAPWRITE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | 		return 1; | 
 | 262 | 	if (!bdi_write_congested(bdi)) | 
 | 263 | 		return 1; | 
 | 264 | 	if (bdi == current->backing_dev_info) | 
 | 265 | 		return 1; | 
 | 266 | 	return 0; | 
 | 267 | } | 
 | 268 |  | 
 | 269 | /* | 
 | 270 |  * We detected a synchronous write error writing a page out.  Probably | 
 | 271 |  * -ENOSPC.  We need to propagate that into the address_space for a subsequent | 
 | 272 |  * fsync(), msync() or close(). | 
 | 273 |  * | 
 | 274 |  * The tricky part is that after writepage we cannot touch the mapping: nothing | 
 | 275 |  * prevents it from being freed up.  But we have a ref on the page and once | 
 | 276 |  * that page is locked, the mapping is pinned. | 
 | 277 |  * | 
 | 278 |  * We're allowed to run sleeping lock_page() here because we know the caller has | 
 | 279 |  * __GFP_FS. | 
 | 280 |  */ | 
 | 281 | static void handle_write_error(struct address_space *mapping, | 
 | 282 | 				struct page *page, int error) | 
 | 283 | { | 
 | 284 | 	lock_page(page); | 
| Guillaume Chazarain | 3e9f45b | 2007-05-08 00:23:25 -0700 | [diff] [blame] | 285 | 	if (page_mapping(page) == mapping) | 
 | 286 | 		mapping_set_error(mapping, error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | 	unlock_page(page); | 
 | 288 | } | 
 | 289 |  | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 290 | /* Request for sync pageout. */ | 
 | 291 | enum pageout_io { | 
 | 292 | 	PAGEOUT_IO_ASYNC, | 
 | 293 | 	PAGEOUT_IO_SYNC, | 
 | 294 | }; | 
 | 295 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 296 | /* possible outcome of pageout() */ | 
 | 297 | typedef enum { | 
 | 298 | 	/* failed to write page out, page is locked */ | 
 | 299 | 	PAGE_KEEP, | 
 | 300 | 	/* move page to the active list, page is locked */ | 
 | 301 | 	PAGE_ACTIVATE, | 
 | 302 | 	/* page has been sent to the disk successfully, page is unlocked */ | 
 | 303 | 	PAGE_SUCCESS, | 
 | 304 | 	/* page is clean and locked */ | 
 | 305 | 	PAGE_CLEAN, | 
 | 306 | } pageout_t; | 
 | 307 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | /* | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 309 |  * pageout is called by shrink_page_list() for each dirty page. | 
 | 310 |  * Calls ->writepage(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 |  */ | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 312 | static pageout_t pageout(struct page *page, struct address_space *mapping, | 
 | 313 | 						enum pageout_io sync_writeback) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | { | 
 | 315 | 	/* | 
 | 316 | 	 * If the page is dirty, only perform writeback if that write | 
 | 317 | 	 * will be non-blocking.  To prevent this allocation from being | 
 | 318 | 	 * stalled by pagecache activity.  But note that there may be | 
 | 319 | 	 * stalls if we need to run get_block().  We could test | 
 | 320 | 	 * PagePrivate for that. | 
 | 321 | 	 * | 
 | 322 | 	 * If this process is currently in generic_file_write() against | 
 | 323 | 	 * this page's queue, we can perform writeback even if that | 
 | 324 | 	 * will block. | 
 | 325 | 	 * | 
 | 326 | 	 * If the page is swapcache, write it back even if that would | 
 | 327 | 	 * block, for some throttling. This happens by accident, because | 
 | 328 | 	 * swap_backing_dev_info is bust: it doesn't reflect the | 
 | 329 | 	 * congestion state of the swapdevs.  Easy to fix, if needed. | 
 | 330 | 	 * See swapfile.c:page_queue_congested(). | 
 | 331 | 	 */ | 
 | 332 | 	if (!is_page_cache_freeable(page)) | 
 | 333 | 		return PAGE_KEEP; | 
 | 334 | 	if (!mapping) { | 
 | 335 | 		/* | 
 | 336 | 		 * Some data journaling orphaned pages can have | 
 | 337 | 		 * page->mapping == NULL while being dirty with clean buffers. | 
 | 338 | 		 */ | 
| akpm@osdl.org | 323aca6 | 2005-04-16 15:24:06 -0700 | [diff] [blame] | 339 | 		if (PagePrivate(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | 			if (try_to_free_buffers(page)) { | 
 | 341 | 				ClearPageDirty(page); | 
| Harvey Harrison | d40cee2 | 2008-04-30 00:55:07 -0700 | [diff] [blame] | 342 | 				printk("%s: orphaned page\n", __func__); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | 				return PAGE_CLEAN; | 
 | 344 | 			} | 
 | 345 | 		} | 
 | 346 | 		return PAGE_KEEP; | 
 | 347 | 	} | 
 | 348 | 	if (mapping->a_ops->writepage == NULL) | 
 | 349 | 		return PAGE_ACTIVATE; | 
 | 350 | 	if (!may_write_to_queue(mapping->backing_dev_info)) | 
 | 351 | 		return PAGE_KEEP; | 
 | 352 |  | 
 | 353 | 	if (clear_page_dirty_for_io(page)) { | 
 | 354 | 		int res; | 
 | 355 | 		struct writeback_control wbc = { | 
 | 356 | 			.sync_mode = WB_SYNC_NONE, | 
 | 357 | 			.nr_to_write = SWAP_CLUSTER_MAX, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 358 | 			.range_start = 0, | 
 | 359 | 			.range_end = LLONG_MAX, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | 			.nonblocking = 1, | 
 | 361 | 			.for_reclaim = 1, | 
 | 362 | 		}; | 
 | 363 |  | 
 | 364 | 		SetPageReclaim(page); | 
 | 365 | 		res = mapping->a_ops->writepage(page, &wbc); | 
 | 366 | 		if (res < 0) | 
 | 367 | 			handle_write_error(mapping, page, res); | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 368 | 		if (res == AOP_WRITEPAGE_ACTIVATE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | 			ClearPageReclaim(page); | 
 | 370 | 			return PAGE_ACTIVATE; | 
 | 371 | 		} | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 372 |  | 
 | 373 | 		/* | 
 | 374 | 		 * Wait on writeback if requested to. This happens when | 
 | 375 | 		 * direct reclaiming a large contiguous area and the | 
 | 376 | 		 * first attempt to free a range of pages fails. | 
 | 377 | 		 */ | 
 | 378 | 		if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC) | 
 | 379 | 			wait_on_page_writeback(page); | 
 | 380 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | 		if (!PageWriteback(page)) { | 
 | 382 | 			/* synchronous write or broken a_ops? */ | 
 | 383 | 			ClearPageReclaim(page); | 
 | 384 | 		} | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 385 | 		inc_zone_page_state(page, NR_VMSCAN_WRITE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | 		return PAGE_SUCCESS; | 
 | 387 | 	} | 
 | 388 |  | 
 | 389 | 	return PAGE_CLEAN; | 
 | 390 | } | 
 | 391 |  | 
| Andrew Morton | a649fd9 | 2006-10-17 00:09:36 -0700 | [diff] [blame] | 392 | /* | 
 | 393 |  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if | 
 | 394 |  * someone else has a ref on the page, abort and return 0.  If it was | 
 | 395 |  * successfully detached, return 1.  Assumes the caller has a single ref on | 
 | 396 |  * this page. | 
 | 397 |  */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 398 | int remove_mapping(struct address_space *mapping, struct page *page) | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 399 | { | 
| Nick Piggin | 28e4d96 | 2006-09-25 23:31:23 -0700 | [diff] [blame] | 400 | 	BUG_ON(!PageLocked(page)); | 
 | 401 | 	BUG_ON(mapping != page_mapping(page)); | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 402 |  | 
 | 403 | 	write_lock_irq(&mapping->tree_lock); | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 404 | 	/* | 
| Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 405 | 	 * The non racy check for a busy page. | 
 | 406 | 	 * | 
 | 407 | 	 * Must be careful with the order of the tests. When someone has | 
 | 408 | 	 * a ref to the page, it may be possible that they dirty it then | 
 | 409 | 	 * drop the reference. So if PageDirty is tested before page_count | 
 | 410 | 	 * here, then the following race may occur: | 
 | 411 | 	 * | 
 | 412 | 	 * get_user_pages(&page); | 
 | 413 | 	 * [user mapping goes away] | 
 | 414 | 	 * write_to(page); | 
 | 415 | 	 *				!PageDirty(page)    [good] | 
 | 416 | 	 * SetPageDirty(page); | 
 | 417 | 	 * put_page(page); | 
 | 418 | 	 *				!page_count(page)   [good, discard it] | 
 | 419 | 	 * | 
 | 420 | 	 * [oops, our write_to data is lost] | 
 | 421 | 	 * | 
 | 422 | 	 * Reversing the order of the tests ensures such a situation cannot | 
 | 423 | 	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags | 
 | 424 | 	 * load is not satisfied before that of page->_count. | 
 | 425 | 	 * | 
 | 426 | 	 * Note that if SetPageDirty is always performed via set_page_dirty, | 
 | 427 | 	 * and thus under tree_lock, then this ordering is not required. | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 428 | 	 */ | 
 | 429 | 	if (unlikely(page_count(page) != 2)) | 
 | 430 | 		goto cannot_free; | 
 | 431 | 	smp_rmb(); | 
 | 432 | 	if (unlikely(PageDirty(page))) | 
 | 433 | 		goto cannot_free; | 
 | 434 |  | 
 | 435 | 	if (PageSwapCache(page)) { | 
 | 436 | 		swp_entry_t swap = { .val = page_private(page) }; | 
 | 437 | 		__delete_from_swap_cache(page); | 
 | 438 | 		write_unlock_irq(&mapping->tree_lock); | 
 | 439 | 		swap_free(swap); | 
 | 440 | 		__put_page(page);	/* The pagecache ref */ | 
 | 441 | 		return 1; | 
 | 442 | 	} | 
 | 443 |  | 
 | 444 | 	__remove_from_page_cache(page); | 
 | 445 | 	write_unlock_irq(&mapping->tree_lock); | 
 | 446 | 	__put_page(page); | 
 | 447 | 	return 1; | 
 | 448 |  | 
 | 449 | cannot_free: | 
 | 450 | 	write_unlock_irq(&mapping->tree_lock); | 
 | 451 | 	return 0; | 
 | 452 | } | 
 | 453 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | /* | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 455 |  * shrink_page_list() returns the number of reclaimed pages | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 |  */ | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 457 | static unsigned long shrink_page_list(struct list_head *page_list, | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 458 | 					struct scan_control *sc, | 
 | 459 | 					enum pageout_io sync_writeback) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | { | 
 | 461 | 	LIST_HEAD(ret_pages); | 
 | 462 | 	struct pagevec freed_pvec; | 
 | 463 | 	int pgactivate = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 464 | 	unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 |  | 
 | 466 | 	cond_resched(); | 
 | 467 |  | 
 | 468 | 	pagevec_init(&freed_pvec, 1); | 
 | 469 | 	while (!list_empty(page_list)) { | 
 | 470 | 		struct address_space *mapping; | 
 | 471 | 		struct page *page; | 
 | 472 | 		int may_enter_fs; | 
 | 473 | 		int referenced; | 
 | 474 |  | 
 | 475 | 		cond_resched(); | 
 | 476 |  | 
 | 477 | 		page = lru_to_page(page_list); | 
 | 478 | 		list_del(&page->lru); | 
 | 479 |  | 
 | 480 | 		if (TestSetPageLocked(page)) | 
 | 481 | 			goto keep; | 
 | 482 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 483 | 		VM_BUG_ON(PageActive(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 |  | 
 | 485 | 		sc->nr_scanned++; | 
| Christoph Lameter | 80e4342 | 2006-02-11 17:55:53 -0800 | [diff] [blame] | 486 |  | 
 | 487 | 		if (!sc->may_swap && page_mapped(page)) | 
 | 488 | 			goto keep_locked; | 
 | 489 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | 		/* Double the slab pressure for mapped and swapcache pages */ | 
 | 491 | 		if (page_mapped(page) || PageSwapCache(page)) | 
 | 492 | 			sc->nr_scanned++; | 
 | 493 |  | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 494 | 		may_enter_fs = (sc->gfp_mask & __GFP_FS) || | 
 | 495 | 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); | 
 | 496 |  | 
 | 497 | 		if (PageWriteback(page)) { | 
 | 498 | 			/* | 
 | 499 | 			 * Synchronous reclaim is performed in two passes, | 
 | 500 | 			 * first an asynchronous pass over the list to | 
 | 501 | 			 * start parallel writeback, and a second synchronous | 
 | 502 | 			 * pass to wait for the IO to complete.  Wait here | 
 | 503 | 			 * for any page for which writeback has already | 
 | 504 | 			 * started. | 
 | 505 | 			 */ | 
 | 506 | 			if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) | 
 | 507 | 				wait_on_page_writeback(page); | 
| Andrew Morton | 4dd4b92 | 2008-03-24 12:29:52 -0700 | [diff] [blame] | 508 | 			else | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 509 | 				goto keep_locked; | 
 | 510 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 |  | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 512 | 		referenced = page_referenced(page, 1, sc->mem_cgroup); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | 		/* In active use or really unfreeable?  Activate it. */ | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 514 | 		if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && | 
 | 515 | 					referenced && page_mapping_inuse(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | 			goto activate_locked; | 
 | 517 |  | 
 | 518 | #ifdef CONFIG_SWAP | 
 | 519 | 		/* | 
 | 520 | 		 * Anonymous process memory has backing store? | 
 | 521 | 		 * Try to allocate it some swap space here. | 
 | 522 | 		 */ | 
| Christoph Lameter | 6e5ef1a | 2006-03-22 00:08:45 -0800 | [diff] [blame] | 523 | 		if (PageAnon(page) && !PageSwapCache(page)) | 
| Christoph Lameter | 1480a54 | 2006-01-08 01:00:53 -0800 | [diff] [blame] | 524 | 			if (!add_to_swap(page, GFP_ATOMIC)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | 				goto activate_locked; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | #endif /* CONFIG_SWAP */ | 
 | 527 |  | 
 | 528 | 		mapping = page_mapping(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 |  | 
 | 530 | 		/* | 
 | 531 | 		 * The page is mapped into the page tables of one or more | 
 | 532 | 		 * processes. Try to unmap it here. | 
 | 533 | 		 */ | 
 | 534 | 		if (page_mapped(page) && mapping) { | 
| Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 535 | 			switch (try_to_unmap(page, 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | 			case SWAP_FAIL: | 
 | 537 | 				goto activate_locked; | 
 | 538 | 			case SWAP_AGAIN: | 
 | 539 | 				goto keep_locked; | 
 | 540 | 			case SWAP_SUCCESS: | 
 | 541 | 				; /* try to free the page below */ | 
 | 542 | 			} | 
 | 543 | 		} | 
 | 544 |  | 
 | 545 | 		if (PageDirty(page)) { | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 546 | 			if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | 				goto keep_locked; | 
| Andrew Morton | 4dd4b92 | 2008-03-24 12:29:52 -0700 | [diff] [blame] | 548 | 			if (!may_enter_fs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | 				goto keep_locked; | 
| Christoph Lameter | 52a8363 | 2006-02-01 03:05:28 -0800 | [diff] [blame] | 550 | 			if (!sc->may_writepage) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | 				goto keep_locked; | 
 | 552 |  | 
 | 553 | 			/* Page is dirty, try to write it out here */ | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 554 | 			switch (pageout(page, mapping, sync_writeback)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | 			case PAGE_KEEP: | 
 | 556 | 				goto keep_locked; | 
 | 557 | 			case PAGE_ACTIVATE: | 
 | 558 | 				goto activate_locked; | 
 | 559 | 			case PAGE_SUCCESS: | 
| Andrew Morton | 4dd4b92 | 2008-03-24 12:29:52 -0700 | [diff] [blame] | 560 | 				if (PageWriteback(page) || PageDirty(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | 					goto keep; | 
 | 562 | 				/* | 
 | 563 | 				 * A synchronous write - probably a ramdisk.  Go | 
 | 564 | 				 * ahead and try to reclaim the page. | 
 | 565 | 				 */ | 
 | 566 | 				if (TestSetPageLocked(page)) | 
 | 567 | 					goto keep; | 
 | 568 | 				if (PageDirty(page) || PageWriteback(page)) | 
 | 569 | 					goto keep_locked; | 
 | 570 | 				mapping = page_mapping(page); | 
 | 571 | 			case PAGE_CLEAN: | 
 | 572 | 				; /* try to free the page below */ | 
 | 573 | 			} | 
 | 574 | 		} | 
 | 575 |  | 
 | 576 | 		/* | 
 | 577 | 		 * If the page has buffers, try to free the buffer mappings | 
 | 578 | 		 * associated with this page. If we succeed we try to free | 
 | 579 | 		 * the page as well. | 
 | 580 | 		 * | 
 | 581 | 		 * We do this even if the page is PageDirty(). | 
 | 582 | 		 * try_to_release_page() does not perform I/O, but it is | 
 | 583 | 		 * possible for a page to have PageDirty set, but it is actually | 
 | 584 | 		 * clean (all its buffers are clean).  This happens if the | 
 | 585 | 		 * buffers were written out directly, with submit_bh(). ext3 | 
 | 586 | 		 * will do this, as well as the blockdev mapping.  | 
 | 587 | 		 * try_to_release_page() will discover that cleanness and will | 
 | 588 | 		 * drop the buffers and mark the page clean - it can be freed. | 
 | 589 | 		 * | 
 | 590 | 		 * Rarely, pages can have buffers and no ->mapping.  These are | 
 | 591 | 		 * the pages which were not successfully invalidated in | 
 | 592 | 		 * truncate_complete_page().  We try to drop those buffers here | 
 | 593 | 		 * and if that worked, and the page is no longer mapped into | 
 | 594 | 		 * process address space (page_count == 1) it can be freed. | 
 | 595 | 		 * Otherwise, leave the page on the LRU so it is swappable. | 
 | 596 | 		 */ | 
 | 597 | 		if (PagePrivate(page)) { | 
 | 598 | 			if (!try_to_release_page(page, sc->gfp_mask)) | 
 | 599 | 				goto activate_locked; | 
 | 600 | 			if (!mapping && page_count(page) == 1) | 
 | 601 | 				goto free_it; | 
 | 602 | 		} | 
 | 603 |  | 
| Nick Piggin | 28e4d96 | 2006-09-25 23:31:23 -0700 | [diff] [blame] | 604 | 		if (!mapping || !remove_mapping(mapping, page)) | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 605 | 			goto keep_locked; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 |  | 
 | 607 | free_it: | 
 | 608 | 		unlock_page(page); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 609 | 		nr_reclaimed++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | 		if (!pagevec_add(&freed_pvec, page)) | 
 | 611 | 			__pagevec_release_nonlru(&freed_pvec); | 
 | 612 | 		continue; | 
 | 613 |  | 
 | 614 | activate_locked: | 
 | 615 | 		SetPageActive(page); | 
 | 616 | 		pgactivate++; | 
 | 617 | keep_locked: | 
 | 618 | 		unlock_page(page); | 
 | 619 | keep: | 
 | 620 | 		list_add(&page->lru, &ret_pages); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 621 | 		VM_BUG_ON(PageLRU(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | 	} | 
 | 623 | 	list_splice(&ret_pages, page_list); | 
 | 624 | 	if (pagevec_count(&freed_pvec)) | 
 | 625 | 		__pagevec_release_nonlru(&freed_pvec); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 626 | 	count_vm_events(PGACTIVATE, pgactivate); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 627 | 	return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | } | 
 | 629 |  | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 630 | /* LRU Isolation modes. */ | 
 | 631 | #define ISOLATE_INACTIVE 0	/* Isolate inactive pages. */ | 
 | 632 | #define ISOLATE_ACTIVE 1	/* Isolate active pages. */ | 
 | 633 | #define ISOLATE_BOTH 2		/* Isolate both active and inactive pages. */ | 
 | 634 |  | 
 | 635 | /* | 
 | 636 |  * Attempt to remove the specified page from its LRU.  Only take this page | 
 | 637 |  * if it is of the appropriate PageActive status.  Pages which are being | 
 | 638 |  * freed elsewhere are also ignored. | 
 | 639 |  * | 
 | 640 |  * page:	page to consider | 
 | 641 |  * mode:	one of the LRU isolation modes defined above | 
 | 642 |  * | 
 | 643 |  * returns 0 on success, -ve errno on failure. | 
 | 644 |  */ | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 645 | int __isolate_lru_page(struct page *page, int mode) | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 646 | { | 
 | 647 | 	int ret = -EINVAL; | 
 | 648 |  | 
 | 649 | 	/* Only take pages on the LRU. */ | 
 | 650 | 	if (!PageLRU(page)) | 
 | 651 | 		return ret; | 
 | 652 |  | 
 | 653 | 	/* | 
 | 654 | 	 * When checking the active state, we need to be sure we are | 
 | 655 | 	 * dealing with comparible boolean values.  Take the logical not | 
 | 656 | 	 * of each. | 
 | 657 | 	 */ | 
 | 658 | 	if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode)) | 
 | 659 | 		return ret; | 
 | 660 |  | 
 | 661 | 	ret = -EBUSY; | 
 | 662 | 	if (likely(get_page_unless_zero(page))) { | 
 | 663 | 		/* | 
 | 664 | 		 * Be careful not to clear PageLRU until after we're | 
 | 665 | 		 * sure the page is not being freed elsewhere -- the | 
 | 666 | 		 * page release code relies on it. | 
 | 667 | 		 */ | 
 | 668 | 		ClearPageLRU(page); | 
 | 669 | 		ret = 0; | 
 | 670 | 	} | 
 | 671 |  | 
 | 672 | 	return ret; | 
 | 673 | } | 
 | 674 |  | 
| Christoph Lameter | 49d2e9c | 2006-01-08 01:00:48 -0800 | [diff] [blame] | 675 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 |  * zone->lru_lock is heavily contended.  Some of the functions that | 
 | 677 |  * shrink the lists perform better by taking out a batch of pages | 
 | 678 |  * and working on them outside the LRU lock. | 
 | 679 |  * | 
 | 680 |  * For pagecache intensive workloads, this function is the hottest | 
 | 681 |  * spot in the kernel (apart from copy_*_user functions). | 
 | 682 |  * | 
 | 683 |  * Appropriate locks must be held before calling this function. | 
 | 684 |  * | 
 | 685 |  * @nr_to_scan:	The number of pages to look through on the list. | 
 | 686 |  * @src:	The LRU list to pull pages off. | 
 | 687 |  * @dst:	The temp list to put pages on to. | 
 | 688 |  * @scanned:	The number of pages that were scanned. | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 689 |  * @order:	The caller's attempted allocation order | 
 | 690 |  * @mode:	One of the LRU isolation modes | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 |  * | 
 | 692 |  * returns how many pages were moved onto *@dst. | 
 | 693 |  */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 694 | static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | 
 | 695 | 		struct list_head *src, struct list_head *dst, | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 696 | 		unsigned long *scanned, int order, int mode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | { | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 698 | 	unsigned long nr_taken = 0; | 
| Wu Fengguang | c9b02d9 | 2006-03-22 00:08:23 -0800 | [diff] [blame] | 699 | 	unsigned long scan; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 |  | 
| Wu Fengguang | c9b02d9 | 2006-03-22 00:08:23 -0800 | [diff] [blame] | 701 | 	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 702 | 		struct page *page; | 
 | 703 | 		unsigned long pfn; | 
 | 704 | 		unsigned long end_pfn; | 
 | 705 | 		unsigned long page_pfn; | 
 | 706 | 		int zone_id; | 
 | 707 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | 		page = lru_to_page(src); | 
 | 709 | 		prefetchw_prev_lru_page(page, src, flags); | 
 | 710 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 711 | 		VM_BUG_ON(!PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 712 |  | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 713 | 		switch (__isolate_lru_page(page, mode)) { | 
 | 714 | 		case 0: | 
 | 715 | 			list_move(&page->lru, dst); | 
| Nick Piggin | 7c8ee9a | 2006-03-22 00:08:03 -0800 | [diff] [blame] | 716 | 			nr_taken++; | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 717 | 			break; | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 718 |  | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 719 | 		case -EBUSY: | 
 | 720 | 			/* else it is being freed elsewhere */ | 
 | 721 | 			list_move(&page->lru, src); | 
 | 722 | 			continue; | 
 | 723 |  | 
 | 724 | 		default: | 
 | 725 | 			BUG(); | 
 | 726 | 		} | 
 | 727 |  | 
 | 728 | 		if (!order) | 
 | 729 | 			continue; | 
 | 730 |  | 
 | 731 | 		/* | 
 | 732 | 		 * Attempt to take all pages in the order aligned region | 
 | 733 | 		 * surrounding the tag page.  Only take those pages of | 
 | 734 | 		 * the same active state as that tag page.  We may safely | 
 | 735 | 		 * round the target page pfn down to the requested order | 
 | 736 | 		 * as the mem_map is guarenteed valid out to MAX_ORDER, | 
 | 737 | 		 * where that page is in a different zone we will detect | 
 | 738 | 		 * it from its zone id and abort this block scan. | 
 | 739 | 		 */ | 
 | 740 | 		zone_id = page_zone_id(page); | 
 | 741 | 		page_pfn = page_to_pfn(page); | 
 | 742 | 		pfn = page_pfn & ~((1 << order) - 1); | 
 | 743 | 		end_pfn = pfn + (1 << order); | 
 | 744 | 		for (; pfn < end_pfn; pfn++) { | 
 | 745 | 			struct page *cursor_page; | 
 | 746 |  | 
 | 747 | 			/* The target page is in the block, ignore it. */ | 
 | 748 | 			if (unlikely(pfn == page_pfn)) | 
 | 749 | 				continue; | 
 | 750 |  | 
 | 751 | 			/* Avoid holes within the zone. */ | 
 | 752 | 			if (unlikely(!pfn_valid_within(pfn))) | 
 | 753 | 				break; | 
 | 754 |  | 
 | 755 | 			cursor_page = pfn_to_page(pfn); | 
 | 756 | 			/* Check that we have not crossed a zone boundary. */ | 
 | 757 | 			if (unlikely(page_zone_id(cursor_page) != zone_id)) | 
 | 758 | 				continue; | 
 | 759 | 			switch (__isolate_lru_page(cursor_page, mode)) { | 
 | 760 | 			case 0: | 
 | 761 | 				list_move(&cursor_page->lru, dst); | 
 | 762 | 				nr_taken++; | 
 | 763 | 				scan++; | 
 | 764 | 				break; | 
 | 765 |  | 
 | 766 | 			case -EBUSY: | 
 | 767 | 				/* else it is being freed elsewhere */ | 
 | 768 | 				list_move(&cursor_page->lru, src); | 
 | 769 | 			default: | 
 | 770 | 				break; | 
 | 771 | 			} | 
 | 772 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | 	} | 
 | 774 |  | 
 | 775 | 	*scanned = scan; | 
 | 776 | 	return nr_taken; | 
 | 777 | } | 
 | 778 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 779 | static unsigned long isolate_pages_global(unsigned long nr, | 
 | 780 | 					struct list_head *dst, | 
 | 781 | 					unsigned long *scanned, int order, | 
 | 782 | 					int mode, struct zone *z, | 
 | 783 | 					struct mem_cgroup *mem_cont, | 
 | 784 | 					int active) | 
 | 785 | { | 
 | 786 | 	if (active) | 
 | 787 | 		return isolate_lru_pages(nr, &z->active_list, dst, | 
 | 788 | 						scanned, order, mode); | 
 | 789 | 	else | 
 | 790 | 		return isolate_lru_pages(nr, &z->inactive_list, dst, | 
 | 791 | 						scanned, order, mode); | 
 | 792 | } | 
 | 793 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | /* | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 795 |  * clear_active_flags() is a helper for shrink_active_list(), clearing | 
 | 796 |  * any active bits from the pages in the list. | 
 | 797 |  */ | 
 | 798 | static unsigned long clear_active_flags(struct list_head *page_list) | 
 | 799 | { | 
 | 800 | 	int nr_active = 0; | 
 | 801 | 	struct page *page; | 
 | 802 |  | 
 | 803 | 	list_for_each_entry(page, page_list, lru) | 
 | 804 | 		if (PageActive(page)) { | 
 | 805 | 			ClearPageActive(page); | 
 | 806 | 			nr_active++; | 
 | 807 | 		} | 
 | 808 |  | 
 | 809 | 	return nr_active; | 
 | 810 | } | 
 | 811 |  | 
 | 812 | /* | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 813 |  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number | 
 | 814 |  * of reclaimed pages | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 |  */ | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 816 | static unsigned long shrink_inactive_list(unsigned long max_scan, | 
 | 817 | 				struct zone *zone, struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | { | 
 | 819 | 	LIST_HEAD(page_list); | 
 | 820 | 	struct pagevec pvec; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 821 | 	unsigned long nr_scanned = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 822 | 	unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 |  | 
 | 824 | 	pagevec_init(&pvec, 1); | 
 | 825 |  | 
 | 826 | 	lru_add_drain(); | 
 | 827 | 	spin_lock_irq(&zone->lru_lock); | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 828 | 	do { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | 		struct page *page; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 830 | 		unsigned long nr_taken; | 
 | 831 | 		unsigned long nr_scan; | 
 | 832 | 		unsigned long nr_freed; | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 833 | 		unsigned long nr_active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 835 | 		nr_taken = sc->isolate_pages(sc->swap_cluster_max, | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 836 | 			     &page_list, &nr_scan, sc->order, | 
 | 837 | 			     (sc->order > PAGE_ALLOC_COSTLY_ORDER)? | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 838 | 					     ISOLATE_BOTH : ISOLATE_INACTIVE, | 
 | 839 | 				zone, sc->mem_cgroup, 0); | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 840 | 		nr_active = clear_active_flags(&page_list); | 
| Andy Whitcroft | e9187bd | 2007-08-22 14:01:25 -0700 | [diff] [blame] | 841 | 		__count_vm_events(PGDEACTIVATE, nr_active); | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 842 |  | 
 | 843 | 		__mod_zone_page_state(zone, NR_ACTIVE, -nr_active); | 
 | 844 | 		__mod_zone_page_state(zone, NR_INACTIVE, | 
 | 845 | 						-(nr_taken - nr_active)); | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 846 | 		if (scan_global_lru(sc)) | 
 | 847 | 			zone->pages_scanned += nr_scan; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | 		spin_unlock_irq(&zone->lru_lock); | 
 | 849 |  | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 850 | 		nr_scanned += nr_scan; | 
| Andy Whitcroft | c661b07 | 2007-08-22 14:01:26 -0700 | [diff] [blame] | 851 | 		nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); | 
 | 852 |  | 
 | 853 | 		/* | 
 | 854 | 		 * If we are direct reclaiming for contiguous pages and we do | 
 | 855 | 		 * not reclaim everything in the list, try again and wait | 
 | 856 | 		 * for IO to complete. This will stall high-order allocations | 
 | 857 | 		 * but that should be acceptable to the caller | 
 | 858 | 		 */ | 
 | 859 | 		if (nr_freed < nr_taken && !current_is_kswapd() && | 
 | 860 | 					sc->order > PAGE_ALLOC_COSTLY_ORDER) { | 
 | 861 | 			congestion_wait(WRITE, HZ/10); | 
 | 862 |  | 
 | 863 | 			/* | 
 | 864 | 			 * The attempt at page out may have made some | 
 | 865 | 			 * of the pages active, mark them inactive again. | 
 | 866 | 			 */ | 
 | 867 | 			nr_active = clear_active_flags(&page_list); | 
 | 868 | 			count_vm_events(PGDEACTIVATE, nr_active); | 
 | 869 |  | 
 | 870 | 			nr_freed += shrink_page_list(&page_list, sc, | 
 | 871 | 							PAGEOUT_IO_SYNC); | 
 | 872 | 		} | 
 | 873 |  | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 874 | 		nr_reclaimed += nr_freed; | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 875 | 		local_irq_disable(); | 
 | 876 | 		if (current_is_kswapd()) { | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 877 | 			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); | 
 | 878 | 			__count_vm_events(KSWAPD_STEAL, nr_freed); | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 879 | 		} else if (scan_global_lru(sc)) | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 880 | 			__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 881 |  | 
| Shantanu Goel | 918d3f9 | 2006-12-29 16:48:59 -0800 | [diff] [blame] | 882 | 		__count_zone_vm_events(PGSTEAL, zone, nr_freed); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 883 |  | 
| Wu Fengguang | fb8d14e | 2006-03-22 00:08:28 -0800 | [diff] [blame] | 884 | 		if (nr_taken == 0) | 
 | 885 | 			goto done; | 
 | 886 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 887 | 		spin_lock(&zone->lru_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | 		/* | 
 | 889 | 		 * Put back any unfreeable pages. | 
 | 890 | 		 */ | 
 | 891 | 		while (!list_empty(&page_list)) { | 
 | 892 | 			page = lru_to_page(&page_list); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 893 | 			VM_BUG_ON(PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 894 | 			SetPageLRU(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | 			list_del(&page->lru); | 
 | 896 | 			if (PageActive(page)) | 
 | 897 | 				add_page_to_active_list(zone, page); | 
 | 898 | 			else | 
 | 899 | 				add_page_to_inactive_list(zone, page); | 
 | 900 | 			if (!pagevec_add(&pvec, page)) { | 
 | 901 | 				spin_unlock_irq(&zone->lru_lock); | 
 | 902 | 				__pagevec_release(&pvec); | 
 | 903 | 				spin_lock_irq(&zone->lru_lock); | 
 | 904 | 			} | 
 | 905 | 		} | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 906 |   	} while (nr_scanned < max_scan); | 
| Wu Fengguang | fb8d14e | 2006-03-22 00:08:28 -0800 | [diff] [blame] | 907 | 	spin_unlock(&zone->lru_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | done: | 
| Wu Fengguang | fb8d14e | 2006-03-22 00:08:28 -0800 | [diff] [blame] | 909 | 	local_irq_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | 	pagevec_release(&pvec); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 911 | 	return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | } | 
 | 913 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 914 | /* | 
 | 915 |  * We are about to scan this zone at a certain priority level.  If that priority | 
 | 916 |  * level is smaller (ie: more urgent) than the previous priority, then note | 
 | 917 |  * that priority level within the zone.  This is done so that when the next | 
 | 918 |  * process comes in to scan this zone, it will immediately start out at this | 
 | 919 |  * priority level rather than having to build up its own scanning priority. | 
 | 920 |  * Here, this priority affects only the reclaim-mapped threshold. | 
 | 921 |  */ | 
 | 922 | static inline void note_zone_scanning_priority(struct zone *zone, int priority) | 
 | 923 | { | 
 | 924 | 	if (priority < zone->prev_priority) | 
 | 925 | 		zone->prev_priority = priority; | 
 | 926 | } | 
 | 927 |  | 
| Nick Piggin | 4ff1ffb | 2006-09-25 23:31:28 -0700 | [diff] [blame] | 928 | static inline int zone_is_near_oom(struct zone *zone) | 
 | 929 | { | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 930 | 	return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE) | 
 | 931 | 				+ zone_page_state(zone, NR_INACTIVE))*3; | 
| Nick Piggin | 4ff1ffb | 2006-09-25 23:31:28 -0700 | [diff] [blame] | 932 | } | 
 | 933 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | /* | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 935 |  * Determine we should try to reclaim mapped pages. | 
 | 936 |  * This is called only when sc->mem_cgroup is NULL. | 
 | 937 |  */ | 
 | 938 | static int calc_reclaim_mapped(struct scan_control *sc, struct zone *zone, | 
 | 939 | 				int priority) | 
 | 940 | { | 
 | 941 | 	long mapped_ratio; | 
 | 942 | 	long distress; | 
 | 943 | 	long swap_tendency; | 
 | 944 | 	long imbalance; | 
 | 945 | 	int reclaim_mapped = 0; | 
 | 946 | 	int prev_priority; | 
 | 947 |  | 
 | 948 | 	if (scan_global_lru(sc) && zone_is_near_oom(zone)) | 
 | 949 | 		return 1; | 
 | 950 | 	/* | 
 | 951 | 	 * `distress' is a measure of how much trouble we're having | 
 | 952 | 	 * reclaiming pages.  0 -> no problems.  100 -> great trouble. | 
 | 953 | 	 */ | 
 | 954 | 	if (scan_global_lru(sc)) | 
 | 955 | 		prev_priority = zone->prev_priority; | 
 | 956 | 	else | 
 | 957 | 		prev_priority = mem_cgroup_get_reclaim_priority(sc->mem_cgroup); | 
 | 958 |  | 
 | 959 | 	distress = 100 >> min(prev_priority, priority); | 
 | 960 |  | 
 | 961 | 	/* | 
 | 962 | 	 * The point of this algorithm is to decide when to start | 
 | 963 | 	 * reclaiming mapped memory instead of just pagecache.  Work out | 
 | 964 | 	 * how much memory | 
 | 965 | 	 * is mapped. | 
 | 966 | 	 */ | 
 | 967 | 	if (scan_global_lru(sc)) | 
 | 968 | 		mapped_ratio = ((global_page_state(NR_FILE_MAPPED) + | 
 | 969 | 				global_page_state(NR_ANON_PAGES)) * 100) / | 
 | 970 | 					vm_total_pages; | 
 | 971 | 	else | 
 | 972 | 		mapped_ratio = mem_cgroup_calc_mapped_ratio(sc->mem_cgroup); | 
 | 973 |  | 
 | 974 | 	/* | 
 | 975 | 	 * Now decide how much we really want to unmap some pages.  The | 
 | 976 | 	 * mapped ratio is downgraded - just because there's a lot of | 
 | 977 | 	 * mapped memory doesn't necessarily mean that page reclaim | 
 | 978 | 	 * isn't succeeding. | 
 | 979 | 	 * | 
 | 980 | 	 * The distress ratio is important - we don't want to start | 
 | 981 | 	 * going oom. | 
 | 982 | 	 * | 
 | 983 | 	 * A 100% value of vm_swappiness overrides this algorithm | 
 | 984 | 	 * altogether. | 
 | 985 | 	 */ | 
 | 986 | 	swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; | 
 | 987 |  | 
 | 988 | 	/* | 
 | 989 | 	 * If there's huge imbalance between active and inactive | 
 | 990 | 	 * (think active 100 times larger than inactive) we should | 
 | 991 | 	 * become more permissive, or the system will take too much | 
 | 992 | 	 * cpu before it start swapping during memory pressure. | 
 | 993 | 	 * Distress is about avoiding early-oom, this is about | 
 | 994 | 	 * making swappiness graceful despite setting it to low | 
 | 995 | 	 * values. | 
 | 996 | 	 * | 
 | 997 | 	 * Avoid div by zero with nr_inactive+1, and max resulting | 
 | 998 | 	 * value is vm_total_pages. | 
 | 999 | 	 */ | 
 | 1000 | 	if (scan_global_lru(sc)) { | 
 | 1001 | 		imbalance  = zone_page_state(zone, NR_ACTIVE); | 
 | 1002 | 		imbalance /= zone_page_state(zone, NR_INACTIVE) + 1; | 
 | 1003 | 	} else | 
 | 1004 | 		imbalance = mem_cgroup_reclaim_imbalance(sc->mem_cgroup); | 
 | 1005 |  | 
 | 1006 | 	/* | 
 | 1007 | 	 * Reduce the effect of imbalance if swappiness is low, | 
 | 1008 | 	 * this means for a swappiness very low, the imbalance | 
 | 1009 | 	 * must be much higher than 100 for this logic to make | 
 | 1010 | 	 * the difference. | 
 | 1011 | 	 * | 
 | 1012 | 	 * Max temporary value is vm_total_pages*100. | 
 | 1013 | 	 */ | 
 | 1014 | 	imbalance *= (vm_swappiness + 1); | 
 | 1015 | 	imbalance /= 100; | 
 | 1016 |  | 
 | 1017 | 	/* | 
 | 1018 | 	 * If not much of the ram is mapped, makes the imbalance | 
 | 1019 | 	 * less relevant, it's high priority we refill the inactive | 
 | 1020 | 	 * list with mapped pages only in presence of high ratio of | 
 | 1021 | 	 * mapped pages. | 
 | 1022 | 	 * | 
 | 1023 | 	 * Max temporary value is vm_total_pages*100. | 
 | 1024 | 	 */ | 
 | 1025 | 	imbalance *= mapped_ratio; | 
 | 1026 | 	imbalance /= 100; | 
 | 1027 |  | 
 | 1028 | 	/* apply imbalance feedback to swap_tendency */ | 
 | 1029 | 	swap_tendency += imbalance; | 
 | 1030 |  | 
 | 1031 | 	/* | 
 | 1032 | 	 * Now use this metric to decide whether to start moving mapped | 
 | 1033 | 	 * memory onto the inactive list. | 
 | 1034 | 	 */ | 
 | 1035 | 	if (swap_tendency >= 100) | 
 | 1036 | 		reclaim_mapped = 1; | 
 | 1037 |  | 
 | 1038 | 	return reclaim_mapped; | 
 | 1039 | } | 
 | 1040 |  | 
 | 1041 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 |  * This moves pages from the active list to the inactive list. | 
 | 1043 |  * | 
 | 1044 |  * We move them the other way if the page is referenced by one or more | 
 | 1045 |  * processes, from rmap. | 
 | 1046 |  * | 
 | 1047 |  * If the pages are mostly unmapped, the processing is fast and it is | 
 | 1048 |  * appropriate to hold zone->lru_lock across the whole operation.  But if | 
 | 1049 |  * the pages are mapped, the processing is slow (page_referenced()) so we | 
 | 1050 |  * should drop zone->lru_lock around each page.  It's impossible to balance | 
 | 1051 |  * this, so instead we remove the pages from the LRU while processing them. | 
 | 1052 |  * It is safe to rely on PG_active against the non-LRU pages in here because | 
 | 1053 |  * nobody will play with that bit on a non-LRU page. | 
 | 1054 |  * | 
 | 1055 |  * The downside is that we have to touch page->_count against each page. | 
 | 1056 |  * But we had to alter page->flags anyway. | 
 | 1057 |  */ | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1058 |  | 
 | 1059 |  | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 1060 | static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | 
| Martin Bligh | bbdb396 | 2006-10-28 10:38:25 -0700 | [diff] [blame] | 1061 | 				struct scan_control *sc, int priority) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | { | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1063 | 	unsigned long pgmoved; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | 	int pgdeactivate = 0; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1065 | 	unsigned long pgscanned; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | 	LIST_HEAD(l_hold);	/* The pages which were snipped off */ | 
 | 1067 | 	LIST_HEAD(l_inactive);	/* Pages to go onto the inactive_list */ | 
 | 1068 | 	LIST_HEAD(l_active);	/* Pages to go onto the active_list */ | 
 | 1069 | 	struct page *page; | 
 | 1070 | 	struct pagevec pvec; | 
 | 1071 | 	int reclaim_mapped = 0; | 
| Christoph Lameter | 2903fb1 | 2006-02-11 17:55:55 -0800 | [diff] [blame] | 1072 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1073 | 	if (sc->may_swap) | 
 | 1074 | 		reclaim_mapped = calc_reclaim_mapped(sc, zone, priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1075 |  | 
 | 1076 | 	lru_add_drain(); | 
 | 1077 | 	spin_lock_irq(&zone->lru_lock); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1078 | 	pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order, | 
 | 1079 | 					ISOLATE_ACTIVE, zone, | 
 | 1080 | 					sc->mem_cgroup, 1); | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1081 | 	/* | 
 | 1082 | 	 * zone->pages_scanned is used for detect zone's oom | 
 | 1083 | 	 * mem_cgroup remembers nr_scan by itself. | 
 | 1084 | 	 */ | 
 | 1085 | 	if (scan_global_lru(sc)) | 
 | 1086 | 		zone->pages_scanned += pgscanned; | 
 | 1087 |  | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1088 | 	__mod_zone_page_state(zone, NR_ACTIVE, -pgmoved); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | 	spin_unlock_irq(&zone->lru_lock); | 
 | 1090 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | 	while (!list_empty(&l_hold)) { | 
 | 1092 | 		cond_resched(); | 
 | 1093 | 		page = lru_to_page(&l_hold); | 
 | 1094 | 		list_del(&page->lru); | 
 | 1095 | 		if (page_mapped(page)) { | 
 | 1096 | 			if (!reclaim_mapped || | 
 | 1097 | 			    (total_swap_pages == 0 && PageAnon(page)) || | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 1098 | 			    page_referenced(page, 0, sc->mem_cgroup)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | 				list_add(&page->lru, &l_active); | 
 | 1100 | 				continue; | 
 | 1101 | 			} | 
 | 1102 | 		} | 
 | 1103 | 		list_add(&page->lru, &l_inactive); | 
 | 1104 | 	} | 
 | 1105 |  | 
 | 1106 | 	pagevec_init(&pvec, 1); | 
 | 1107 | 	pgmoved = 0; | 
 | 1108 | 	spin_lock_irq(&zone->lru_lock); | 
 | 1109 | 	while (!list_empty(&l_inactive)) { | 
 | 1110 | 		page = lru_to_page(&l_inactive); | 
 | 1111 | 		prefetchw_prev_lru_page(page, &l_inactive, flags); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 1112 | 		VM_BUG_ON(PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 1113 | 		SetPageLRU(page); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 1114 | 		VM_BUG_ON(!PageActive(page)); | 
| Nick Piggin | 4c84cac | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 1115 | 		ClearPageActive(page); | 
 | 1116 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | 		list_move(&page->lru, &zone->inactive_list); | 
| Hugh Dickins | 427d541 | 2008-03-04 14:29:03 -0800 | [diff] [blame] | 1118 | 		mem_cgroup_move_lists(page, false); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | 		pgmoved++; | 
 | 1120 | 		if (!pagevec_add(&pvec, page)) { | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1121 | 			__mod_zone_page_state(zone, NR_INACTIVE, pgmoved); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | 			spin_unlock_irq(&zone->lru_lock); | 
 | 1123 | 			pgdeactivate += pgmoved; | 
 | 1124 | 			pgmoved = 0; | 
 | 1125 | 			if (buffer_heads_over_limit) | 
 | 1126 | 				pagevec_strip(&pvec); | 
 | 1127 | 			__pagevec_release(&pvec); | 
 | 1128 | 			spin_lock_irq(&zone->lru_lock); | 
 | 1129 | 		} | 
 | 1130 | 	} | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1131 | 	__mod_zone_page_state(zone, NR_INACTIVE, pgmoved); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | 	pgdeactivate += pgmoved; | 
 | 1133 | 	if (buffer_heads_over_limit) { | 
 | 1134 | 		spin_unlock_irq(&zone->lru_lock); | 
 | 1135 | 		pagevec_strip(&pvec); | 
 | 1136 | 		spin_lock_irq(&zone->lru_lock); | 
 | 1137 | 	} | 
 | 1138 |  | 
 | 1139 | 	pgmoved = 0; | 
 | 1140 | 	while (!list_empty(&l_active)) { | 
 | 1141 | 		page = lru_to_page(&l_active); | 
 | 1142 | 		prefetchw_prev_lru_page(page, &l_active, flags); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 1143 | 		VM_BUG_ON(PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 1144 | 		SetPageLRU(page); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 1145 | 		VM_BUG_ON(!PageActive(page)); | 
| Hugh Dickins | 427d541 | 2008-03-04 14:29:03 -0800 | [diff] [blame] | 1146 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | 		list_move(&page->lru, &zone->active_list); | 
| Hugh Dickins | 427d541 | 2008-03-04 14:29:03 -0800 | [diff] [blame] | 1148 | 		mem_cgroup_move_lists(page, true); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | 		pgmoved++; | 
 | 1150 | 		if (!pagevec_add(&pvec, page)) { | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1151 | 			__mod_zone_page_state(zone, NR_ACTIVE, pgmoved); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | 			pgmoved = 0; | 
 | 1153 | 			spin_unlock_irq(&zone->lru_lock); | 
 | 1154 | 			__pagevec_release(&pvec); | 
 | 1155 | 			spin_lock_irq(&zone->lru_lock); | 
 | 1156 | 		} | 
 | 1157 | 	} | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1158 | 	__mod_zone_page_state(zone, NR_ACTIVE, pgmoved); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1160 | 	__count_zone_vm_events(PGREFILL, zone, pgscanned); | 
 | 1161 | 	__count_vm_events(PGDEACTIVATE, pgdeactivate); | 
 | 1162 | 	spin_unlock_irq(&zone->lru_lock); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 1163 |  | 
 | 1164 | 	pagevec_release(&pvec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | } | 
 | 1166 |  | 
 | 1167 | /* | 
 | 1168 |  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim. | 
 | 1169 |  */ | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1170 | static unsigned long shrink_zone(int priority, struct zone *zone, | 
 | 1171 | 				struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 | { | 
 | 1173 | 	unsigned long nr_active; | 
 | 1174 | 	unsigned long nr_inactive; | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1175 | 	unsigned long nr_to_scan; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1176 | 	unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1178 | 	if (scan_global_lru(sc)) { | 
 | 1179 | 		/* | 
 | 1180 | 		 * Add one to nr_to_scan just to make sure that the kernel | 
 | 1181 | 		 * will slowly sift through the active list. | 
 | 1182 | 		 */ | 
 | 1183 | 		zone->nr_scan_active += | 
 | 1184 | 			(zone_page_state(zone, NR_ACTIVE) >> priority) + 1; | 
 | 1185 | 		nr_active = zone->nr_scan_active; | 
 | 1186 | 		zone->nr_scan_inactive += | 
 | 1187 | 			(zone_page_state(zone, NR_INACTIVE) >> priority) + 1; | 
 | 1188 | 		nr_inactive = zone->nr_scan_inactive; | 
 | 1189 | 		if (nr_inactive >= sc->swap_cluster_max) | 
 | 1190 | 			zone->nr_scan_inactive = 0; | 
 | 1191 | 		else | 
 | 1192 | 			nr_inactive = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1193 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1194 | 		if (nr_active >= sc->swap_cluster_max) | 
 | 1195 | 			zone->nr_scan_active = 0; | 
 | 1196 | 		else | 
 | 1197 | 			nr_active = 0; | 
 | 1198 | 	} else { | 
 | 1199 | 		/* | 
 | 1200 | 		 * This reclaim occurs not because zone memory shortage but | 
 | 1201 | 		 * because memory controller hits its limit. | 
 | 1202 | 		 * Then, don't modify zone reclaim related data. | 
 | 1203 | 		 */ | 
 | 1204 | 		nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup, | 
 | 1205 | 					zone, priority); | 
 | 1206 |  | 
 | 1207 | 		nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup, | 
 | 1208 | 					zone, priority); | 
 | 1209 | 	} | 
 | 1210 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | 	while (nr_active || nr_inactive) { | 
 | 1213 | 		if (nr_active) { | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1214 | 			nr_to_scan = min(nr_active, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | 					(unsigned long)sc->swap_cluster_max); | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1216 | 			nr_active -= nr_to_scan; | 
| Martin Bligh | bbdb396 | 2006-10-28 10:38:25 -0700 | [diff] [blame] | 1217 | 			shrink_active_list(nr_to_scan, zone, sc, priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | 		} | 
 | 1219 |  | 
 | 1220 | 		if (nr_inactive) { | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1221 | 			nr_to_scan = min(nr_inactive, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | 					(unsigned long)sc->swap_cluster_max); | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1223 | 			nr_inactive -= nr_to_scan; | 
| Andrew Morton | 1742f19 | 2006-03-22 00:08:21 -0800 | [diff] [blame] | 1224 | 			nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, | 
 | 1225 | 								sc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 | 		} | 
 | 1227 | 	} | 
 | 1228 |  | 
| Andrew Morton | 232ea4d | 2007-02-28 20:13:21 -0800 | [diff] [blame] | 1229 | 	throttle_vm_writeout(sc->gfp_mask); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1230 | 	return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | } | 
 | 1232 |  | 
 | 1233 | /* | 
 | 1234 |  * This is the direct reclaim path, for page-allocating processes.  We only | 
 | 1235 |  * try to reclaim pages from zones which will satisfy the caller's allocation | 
 | 1236 |  * request. | 
 | 1237 |  * | 
 | 1238 |  * We reclaim from a zone even if that zone is over pages_high.  Because: | 
 | 1239 |  * a) The caller may be trying to free *extra* pages to satisfy a higher-order | 
 | 1240 |  *    allocation or | 
 | 1241 |  * b) The zones may be over pages_high but they must go *over* pages_high to | 
 | 1242 |  *    satisfy the `incremental min' zone defense algorithm. | 
 | 1243 |  * | 
 | 1244 |  * Returns the number of reclaimed pages. | 
 | 1245 |  * | 
 | 1246 |  * If a zone is deemed to be full of pinned pages then just give it a light | 
 | 1247 |  * scan then give up on it. | 
 | 1248 |  */ | 
| Mel Gorman | dac1d27 | 2008-04-28 02:12:12 -0700 | [diff] [blame] | 1249 | static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1250 | 					struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | { | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 1252 | 	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1253 | 	unsigned long nr_reclaimed = 0; | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 1254 | 	struct zoneref *z; | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 1255 | 	struct zone *zone; | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1256 |  | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 1257 | 	sc->all_unreclaimable = 1; | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 1258 | 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1259 | 		if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | 			continue; | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1261 | 		/* | 
 | 1262 | 		 * Take care memory controller reclaiming has small influence | 
 | 1263 | 		 * to global LRU. | 
 | 1264 | 		 */ | 
 | 1265 | 		if (scan_global_lru(sc)) { | 
 | 1266 | 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 
 | 1267 | 				continue; | 
 | 1268 | 			note_zone_scanning_priority(zone, priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1270 | 			if (zone_is_all_unreclaimable(zone) && | 
 | 1271 | 						priority != DEF_PRIORITY) | 
 | 1272 | 				continue;	/* Let kswapd poll it */ | 
 | 1273 | 			sc->all_unreclaimable = 0; | 
 | 1274 | 		} else { | 
 | 1275 | 			/* | 
 | 1276 | 			 * Ignore cpuset limitation here. We just want to reduce | 
 | 1277 | 			 * # of used pages by us regardless of memory shortage. | 
 | 1278 | 			 */ | 
 | 1279 | 			sc->all_unreclaimable = 0; | 
 | 1280 | 			mem_cgroup_note_reclaim_priority(sc->mem_cgroup, | 
 | 1281 | 							priority); | 
 | 1282 | 		} | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 1283 |  | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1284 | 		nr_reclaimed += shrink_zone(priority, zone, sc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | 	} | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1286 |  | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1287 | 	return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | } | 
 | 1289 |   | 
 | 1290 | /* | 
 | 1291 |  * This is the main entry point to direct page reclaim. | 
 | 1292 |  * | 
 | 1293 |  * If a full scan of the inactive list fails to free enough memory then we | 
 | 1294 |  * are "out of memory" and something needs to be killed. | 
 | 1295 |  * | 
 | 1296 |  * If the caller is !__GFP_FS then the probability of a failure is reasonably | 
 | 1297 |  * high - the zone may be full of dirty or under-writeback pages, which this | 
 | 1298 |  * caller can't do much about.  We kick pdflush and take explicit naps in the | 
 | 1299 |  * hope that some of these pages can be written.  But if the allocating task | 
 | 1300 |  * holds filesystem locks which prevent writeout this might not work, and the | 
 | 1301 |  * allocation attempt will fail. | 
| Nishanth Aravamudan | a41f24e | 2008-04-29 00:58:25 -0700 | [diff] [blame] | 1302 |  * | 
 | 1303 |  * returns:	0, if no pages reclaimed | 
 | 1304 |  * 		else, the number of pages reclaimed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 |  */ | 
| Mel Gorman | dac1d27 | 2008-04-28 02:12:12 -0700 | [diff] [blame] | 1306 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 1307 | 					struct scan_control *sc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | { | 
 | 1309 | 	int priority; | 
 | 1310 | 	int ret = 0; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1311 | 	unsigned long total_scanned = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1312 | 	unsigned long nr_reclaimed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1313 | 	struct reclaim_state *reclaim_state = current->reclaim_state; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | 	unsigned long lru_pages = 0; | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 1315 | 	struct zoneref *z; | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 1316 | 	struct zone *zone; | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 1317 | 	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1319 | 	if (scan_global_lru(sc)) | 
 | 1320 | 		count_vm_event(ALLOCSTALL); | 
 | 1321 | 	/* | 
 | 1322 | 	 * mem_cgroup will not do shrink_slab. | 
 | 1323 | 	 */ | 
 | 1324 | 	if (scan_global_lru(sc)) { | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 1325 | 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1327 | 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 
 | 1328 | 				continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1330 | 			lru_pages += zone_page_state(zone, NR_ACTIVE) | 
 | 1331 | 					+ zone_page_state(zone, NR_INACTIVE); | 
 | 1332 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | 	} | 
 | 1334 |  | 
 | 1335 | 	for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1336 | 		sc->nr_scanned = 0; | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 1337 | 		if (!priority) | 
 | 1338 | 			disable_swap_token(); | 
| Mel Gorman | dac1d27 | 2008-04-28 02:12:12 -0700 | [diff] [blame] | 1339 | 		nr_reclaimed += shrink_zones(priority, zonelist, sc); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1340 | 		/* | 
 | 1341 | 		 * Don't shrink slabs when reclaiming memory from | 
 | 1342 | 		 * over limit cgroups | 
 | 1343 | 		 */ | 
| KAMEZAWA Hiroyuki | 91a4547 | 2008-02-07 00:14:29 -0800 | [diff] [blame] | 1344 | 		if (scan_global_lru(sc)) { | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 1345 | 			shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); | 
| KAMEZAWA Hiroyuki | 91a4547 | 2008-02-07 00:14:29 -0800 | [diff] [blame] | 1346 | 			if (reclaim_state) { | 
 | 1347 | 				nr_reclaimed += reclaim_state->reclaimed_slab; | 
 | 1348 | 				reclaim_state->reclaimed_slab = 0; | 
 | 1349 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | 		} | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1351 | 		total_scanned += sc->nr_scanned; | 
 | 1352 | 		if (nr_reclaimed >= sc->swap_cluster_max) { | 
| Nishanth Aravamudan | a41f24e | 2008-04-29 00:58:25 -0700 | [diff] [blame] | 1353 | 			ret = nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | 			goto out; | 
 | 1355 | 		} | 
 | 1356 |  | 
 | 1357 | 		/* | 
 | 1358 | 		 * Try to write back as many pages as we just scanned.  This | 
 | 1359 | 		 * tends to cause slow streaming writers to write data to the | 
 | 1360 | 		 * disk smoothly, at the dirtying rate, which is nice.   But | 
 | 1361 | 		 * that's undesirable in laptop mode, where we *want* lumpy | 
 | 1362 | 		 * writeout.  So in laptop mode, write out the whole world. | 
 | 1363 | 		 */ | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1364 | 		if (total_scanned > sc->swap_cluster_max + | 
 | 1365 | 					sc->swap_cluster_max / 2) { | 
| Pekka J Enberg | 687a21c | 2005-06-28 20:44:55 -0700 | [diff] [blame] | 1366 | 			wakeup_pdflush(laptop_mode ? 0 : total_scanned); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1367 | 			sc->may_writepage = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | 		} | 
 | 1369 |  | 
 | 1370 | 		/* Take a nap, wait for some writeback to complete */ | 
| Andrew Morton | 4dd4b92 | 2008-03-24 12:29:52 -0700 | [diff] [blame] | 1371 | 		if (sc->nr_scanned && priority < DEF_PRIORITY - 2) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1372 | 			congestion_wait(WRITE, HZ/10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1373 | 	} | 
| Nick Piggin | 408d854 | 2006-09-25 23:31:27 -0700 | [diff] [blame] | 1374 | 	/* top priority shrink_caches still had more to do? don't OOM, then */ | 
| KAMEZAWA Hiroyuki | 91a4547 | 2008-02-07 00:14:29 -0800 | [diff] [blame] | 1375 | 	if (!sc->all_unreclaimable && scan_global_lru(sc)) | 
| Nishanth Aravamudan | a41f24e | 2008-04-29 00:58:25 -0700 | [diff] [blame] | 1376 | 		ret = nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | out: | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1378 | 	/* | 
 | 1379 | 	 * Now that we've scanned all the zones at this priority level, note | 
 | 1380 | 	 * that level within the zone so that the next thread which performs | 
 | 1381 | 	 * scanning of this zone will immediately start out at this priority | 
 | 1382 | 	 * level.  This affects only the decision whether or not to bring | 
 | 1383 | 	 * mapped pages onto the inactive list. | 
 | 1384 | 	 */ | 
 | 1385 | 	if (priority < 0) | 
 | 1386 | 		priority = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1387 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1388 | 	if (scan_global_lru(sc)) { | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 1389 | 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 |  | 
| KAMEZAWA Hiroyuki | 1cfb419 | 2008-02-07 00:14:37 -0800 | [diff] [blame] | 1391 | 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 
 | 1392 | 				continue; | 
 | 1393 |  | 
 | 1394 | 			zone->prev_priority = priority; | 
 | 1395 | 		} | 
 | 1396 | 	} else | 
 | 1397 | 		mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); | 
 | 1398 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | 	return ret; | 
 | 1400 | } | 
 | 1401 |  | 
| Mel Gorman | dac1d27 | 2008-04-28 02:12:12 -0700 | [diff] [blame] | 1402 | unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | 
 | 1403 | 								gfp_t gfp_mask) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1404 | { | 
 | 1405 | 	struct scan_control sc = { | 
 | 1406 | 		.gfp_mask = gfp_mask, | 
 | 1407 | 		.may_writepage = !laptop_mode, | 
 | 1408 | 		.swap_cluster_max = SWAP_CLUSTER_MAX, | 
 | 1409 | 		.may_swap = 1, | 
 | 1410 | 		.swappiness = vm_swappiness, | 
 | 1411 | 		.order = order, | 
 | 1412 | 		.mem_cgroup = NULL, | 
 | 1413 | 		.isolate_pages = isolate_pages_global, | 
 | 1414 | 	}; | 
 | 1415 |  | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 1416 | 	return do_try_to_free_pages(zonelist, &sc); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1417 | } | 
 | 1418 |  | 
| Balbir Singh | 00f0b82 | 2008-03-04 14:28:39 -0800 | [diff] [blame] | 1419 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1420 |  | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 1421 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | 
 | 1422 | 						gfp_t gfp_mask) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1423 | { | 
 | 1424 | 	struct scan_control sc = { | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1425 | 		.may_writepage = !laptop_mode, | 
 | 1426 | 		.may_swap = 1, | 
 | 1427 | 		.swap_cluster_max = SWAP_CLUSTER_MAX, | 
 | 1428 | 		.swappiness = vm_swappiness, | 
 | 1429 | 		.order = 0, | 
 | 1430 | 		.mem_cgroup = mem_cont, | 
 | 1431 | 		.isolate_pages = mem_cgroup_isolate_pages, | 
 | 1432 | 	}; | 
| Mel Gorman | dac1d27 | 2008-04-28 02:12:12 -0700 | [diff] [blame] | 1433 | 	struct zonelist *zonelist; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1434 |  | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 1435 | 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 
 | 1436 | 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 
 | 1437 | 	zonelist = NODE_DATA(numa_node_id())->node_zonelists; | 
 | 1438 | 	return do_try_to_free_pages(zonelist, &sc); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1439 | } | 
 | 1440 | #endif | 
 | 1441 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | /* | 
 | 1443 |  * For kswapd, balance_pgdat() will work across all this node's zones until | 
 | 1444 |  * they are all at pages_high. | 
 | 1445 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 |  * Returns the number of pages which were actually freed. | 
 | 1447 |  * | 
 | 1448 |  * There is special handling here for zones which are full of pinned pages. | 
 | 1449 |  * This can happen if the pages are all mlocked, or if they are all used by | 
 | 1450 |  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb. | 
 | 1451 |  * What we do is to detect the case where all pages in the zone have been | 
 | 1452 |  * scanned twice and there has been zero successful reclaim.  Mark the zone as | 
 | 1453 |  * dead and from now on, only perform a short scan.  Basically we're polling | 
 | 1454 |  * the zone for when the problem goes away. | 
 | 1455 |  * | 
 | 1456 |  * kswapd scans the zones in the highmem->normal->dma direction.  It skips | 
 | 1457 |  * zones which have free_pages > pages_high, but once a zone is found to have | 
 | 1458 |  * free_pages <= pages_high, we scan that zone and the lower zones regardless | 
 | 1459 |  * of the number of free pages in the lower zones.  This interoperates with | 
 | 1460 |  * the page allocator fallback scheme to ensure that aging of pages is balanced | 
 | 1461 |  * across the zones. | 
 | 1462 |  */ | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1463 | static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1465 | 	int all_zones_ok; | 
 | 1466 | 	int priority; | 
 | 1467 | 	int i; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1468 | 	unsigned long total_scanned; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1469 | 	unsigned long nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | 	struct reclaim_state *reclaim_state = current->reclaim_state; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1471 | 	struct scan_control sc = { | 
 | 1472 | 		.gfp_mask = GFP_KERNEL, | 
 | 1473 | 		.may_swap = 1, | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1474 | 		.swap_cluster_max = SWAP_CLUSTER_MAX, | 
 | 1475 | 		.swappiness = vm_swappiness, | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 1476 | 		.order = order, | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1477 | 		.mem_cgroup = NULL, | 
 | 1478 | 		.isolate_pages = isolate_pages_global, | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1479 | 	}; | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1480 | 	/* | 
 | 1481 | 	 * temp_priority is used to remember the scanning priority at which | 
 | 1482 | 	 * this zone was successfully refilled to free_pages == pages_high. | 
 | 1483 | 	 */ | 
 | 1484 | 	int temp_priority[MAX_NR_ZONES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1485 |  | 
 | 1486 | loop_again: | 
 | 1487 | 	total_scanned = 0; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1488 | 	nr_reclaimed = 0; | 
| Christoph Lameter | c0bbbc7 | 2006-06-11 15:22:26 -0700 | [diff] [blame] | 1489 | 	sc.may_writepage = !laptop_mode; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1490 | 	count_vm_event(PAGEOUTRUN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1491 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1492 | 	for (i = 0; i < pgdat->nr_zones; i++) | 
 | 1493 | 		temp_priority[i] = DEF_PRIORITY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 |  | 
 | 1495 | 	for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 
 | 1496 | 		int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */ | 
 | 1497 | 		unsigned long lru_pages = 0; | 
 | 1498 |  | 
| Rik van Riel | f7b7fd8 | 2005-11-28 13:44:07 -0800 | [diff] [blame] | 1499 | 		/* The swap token gets in the way of swapout... */ | 
 | 1500 | 		if (!priority) | 
 | 1501 | 			disable_swap_token(); | 
 | 1502 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | 		all_zones_ok = 1; | 
 | 1504 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1505 | 		/* | 
 | 1506 | 		 * Scan in the highmem->dma direction for the highest | 
 | 1507 | 		 * zone which needs scanning | 
 | 1508 | 		 */ | 
 | 1509 | 		for (i = pgdat->nr_zones - 1; i >= 0; i--) { | 
 | 1510 | 			struct zone *zone = pgdat->node_zones + i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1512 | 			if (!populated_zone(zone)) | 
 | 1513 | 				continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 1515 | 			if (zone_is_all_unreclaimable(zone) && | 
 | 1516 | 			    priority != DEF_PRIORITY) | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1517 | 				continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1519 | 			if (!zone_watermark_ok(zone, order, zone->pages_high, | 
 | 1520 | 					       0, 0)) { | 
 | 1521 | 				end_zone = i; | 
| Andrew Morton | e1dbeda | 2006-12-06 20:32:01 -0800 | [diff] [blame] | 1522 | 				break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1524 | 		} | 
| Andrew Morton | e1dbeda | 2006-12-06 20:32:01 -0800 | [diff] [blame] | 1525 | 		if (i < 0) | 
 | 1526 | 			goto out; | 
 | 1527 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | 		for (i = 0; i <= end_zone; i++) { | 
 | 1529 | 			struct zone *zone = pgdat->node_zones + i; | 
 | 1530 |  | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1531 | 			lru_pages += zone_page_state(zone, NR_ACTIVE) | 
 | 1532 | 					+ zone_page_state(zone, NR_INACTIVE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1533 | 		} | 
 | 1534 |  | 
 | 1535 | 		/* | 
 | 1536 | 		 * Now scan the zone in the dma->highmem direction, stopping | 
 | 1537 | 		 * at the last zone which needs scanning. | 
 | 1538 | 		 * | 
 | 1539 | 		 * We do this because the page allocator works in the opposite | 
 | 1540 | 		 * direction.  This prevents the page allocator from allocating | 
 | 1541 | 		 * pages behind kswapd's direction of progress, which would | 
 | 1542 | 		 * cause too much scanning of the lower zones. | 
 | 1543 | 		 */ | 
 | 1544 | 		for (i = 0; i <= end_zone; i++) { | 
 | 1545 | 			struct zone *zone = pgdat->node_zones + i; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 1546 | 			int nr_slab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1548 | 			if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 | 				continue; | 
 | 1550 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 1551 | 			if (zone_is_all_unreclaimable(zone) && | 
 | 1552 | 					priority != DEF_PRIORITY) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | 				continue; | 
 | 1554 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1555 | 			if (!zone_watermark_ok(zone, order, zone->pages_high, | 
 | 1556 | 					       end_zone, 0)) | 
 | 1557 | 				all_zones_ok = 0; | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1558 | 			temp_priority[i] = priority; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1559 | 			sc.nr_scanned = 0; | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1560 | 			note_zone_scanning_priority(zone, priority); | 
| Rik van Riel | 32a4330 | 2007-10-16 01:24:50 -0700 | [diff] [blame] | 1561 | 			/* | 
 | 1562 | 			 * We put equal pressure on every zone, unless one | 
 | 1563 | 			 * zone has way too many pages free already. | 
 | 1564 | 			 */ | 
 | 1565 | 			if (!zone_watermark_ok(zone, order, 8*zone->pages_high, | 
 | 1566 | 						end_zone, 0)) | 
 | 1567 | 				nr_reclaimed += shrink_zone(priority, zone, &sc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1568 | 			reclaim_state->reclaimed_slab = 0; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 1569 | 			nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 
 | 1570 | 						lru_pages); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1571 | 			nr_reclaimed += reclaim_state->reclaimed_slab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | 			total_scanned += sc.nr_scanned; | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 1573 | 			if (zone_is_all_unreclaimable(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | 				continue; | 
| akpm@osdl.org | b15e090 | 2005-06-21 17:14:35 -0700 | [diff] [blame] | 1575 | 			if (nr_slab == 0 && zone->pages_scanned >= | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1576 | 				(zone_page_state(zone, NR_ACTIVE) | 
 | 1577 | 				+ zone_page_state(zone, NR_INACTIVE)) * 6) | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 1578 | 					zone_set_flag(zone, | 
 | 1579 | 						      ZONE_ALL_UNRECLAIMABLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | 			/* | 
 | 1581 | 			 * If we've done a decent amount of scanning and | 
 | 1582 | 			 * the reclaim ratio is low, start doing writepage | 
 | 1583 | 			 * even in laptop mode | 
 | 1584 | 			 */ | 
 | 1585 | 			if (total_scanned > SWAP_CLUSTER_MAX * 2 && | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1586 | 			    total_scanned > nr_reclaimed + nr_reclaimed / 2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1587 | 				sc.may_writepage = 1; | 
 | 1588 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | 		if (all_zones_ok) | 
 | 1590 | 			break;		/* kswapd: all done */ | 
 | 1591 | 		/* | 
 | 1592 | 		 * OK, kswapd is getting into trouble.  Take a nap, then take | 
 | 1593 | 		 * another pass across the zones. | 
 | 1594 | 		 */ | 
| Andrew Morton | 4dd4b92 | 2008-03-24 12:29:52 -0700 | [diff] [blame] | 1595 | 		if (total_scanned && priority < DEF_PRIORITY - 2) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1596 | 			congestion_wait(WRITE, HZ/10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 |  | 
 | 1598 | 		/* | 
 | 1599 | 		 * We do this so kswapd doesn't build up large priorities for | 
 | 1600 | 		 * example when it is freeing in parallel with allocators. It | 
 | 1601 | 		 * matches the direct reclaim path behaviour in terms of impact | 
 | 1602 | 		 * on zone->*_priority. | 
 | 1603 | 		 */ | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1604 | 		if (nr_reclaimed >= SWAP_CLUSTER_MAX) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | 			break; | 
 | 1606 | 	} | 
 | 1607 | out: | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1608 | 	/* | 
 | 1609 | 	 * Note within each zone the priority level at which this zone was | 
 | 1610 | 	 * brought into a happy state.  So that the next thread which scans this | 
 | 1611 | 	 * zone will start out at that priority level. | 
 | 1612 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1613 | 	for (i = 0; i < pgdat->nr_zones; i++) { | 
 | 1614 | 		struct zone *zone = pgdat->node_zones + i; | 
 | 1615 |  | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 1616 | 		zone->prev_priority = temp_priority[i]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | 	} | 
 | 1618 | 	if (!all_zones_ok) { | 
 | 1619 | 		cond_resched(); | 
| Rafael J. Wysocki | 8357376 | 2006-12-06 20:34:18 -0800 | [diff] [blame] | 1620 |  | 
 | 1621 | 		try_to_freeze(); | 
 | 1622 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | 		goto loop_again; | 
 | 1624 | 	} | 
 | 1625 |  | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1626 | 	return nr_reclaimed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | } | 
 | 1628 |  | 
 | 1629 | /* | 
 | 1630 |  * The background pageout daemon, started as a kernel thread | 
 | 1631 |  * from the init process.  | 
 | 1632 |  * | 
 | 1633 |  * This basically trickles out pages so that we have _some_ | 
 | 1634 |  * free memory available even if there is no other activity | 
 | 1635 |  * that frees anything up. This is needed for things like routing | 
 | 1636 |  * etc, where we otherwise might have all activity going on in | 
 | 1637 |  * asynchronous contexts that cannot page things out. | 
 | 1638 |  * | 
 | 1639 |  * If there are applications that are active memory-allocators | 
 | 1640 |  * (most normal use), this basically shouldn't matter. | 
 | 1641 |  */ | 
 | 1642 | static int kswapd(void *p) | 
 | 1643 | { | 
 | 1644 | 	unsigned long order; | 
 | 1645 | 	pg_data_t *pgdat = (pg_data_t*)p; | 
 | 1646 | 	struct task_struct *tsk = current; | 
 | 1647 | 	DEFINE_WAIT(wait); | 
 | 1648 | 	struct reclaim_state reclaim_state = { | 
 | 1649 | 		.reclaimed_slab = 0, | 
 | 1650 | 	}; | 
| Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 1651 | 	node_to_cpumask_ptr(cpumask, pgdat->node_id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1652 |  | 
| Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 1653 | 	if (!cpus_empty(*cpumask)) | 
 | 1654 | 		set_cpus_allowed_ptr(tsk, cpumask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1655 | 	current->reclaim_state = &reclaim_state; | 
 | 1656 |  | 
 | 1657 | 	/* | 
 | 1658 | 	 * Tell the memory management that we're a "memory allocator", | 
 | 1659 | 	 * and that if we need more memory we should get access to it | 
 | 1660 | 	 * regardless (see "__alloc_pages()"). "kswapd" should | 
 | 1661 | 	 * never get caught in the normal page freeing logic. | 
 | 1662 | 	 * | 
 | 1663 | 	 * (Kswapd normally doesn't need memory anyway, but sometimes | 
 | 1664 | 	 * you need a small amount of memory in order to be able to | 
 | 1665 | 	 * page out something else, and this flag essentially protects | 
 | 1666 | 	 * us from recursively trying to free more memory as we're | 
 | 1667 | 	 * trying to free the first piece of memory in the first place). | 
 | 1668 | 	 */ | 
| Christoph Lameter | 930d915 | 2006-01-08 01:00:47 -0800 | [diff] [blame] | 1669 | 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 1670 | 	set_freezable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 |  | 
 | 1672 | 	order = 0; | 
 | 1673 | 	for ( ; ; ) { | 
 | 1674 | 		unsigned long new_order; | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1675 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1676 | 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); | 
 | 1677 | 		new_order = pgdat->kswapd_max_order; | 
 | 1678 | 		pgdat->kswapd_max_order = 0; | 
 | 1679 | 		if (order < new_order) { | 
 | 1680 | 			/* | 
 | 1681 | 			 * Don't sleep if someone wants a larger 'order' | 
 | 1682 | 			 * allocation | 
 | 1683 | 			 */ | 
 | 1684 | 			order = new_order; | 
 | 1685 | 		} else { | 
| Rafael J. Wysocki | b1296cc | 2007-05-06 14:50:48 -0700 | [diff] [blame] | 1686 | 			if (!freezing(current)) | 
 | 1687 | 				schedule(); | 
 | 1688 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1689 | 			order = pgdat->kswapd_max_order; | 
 | 1690 | 		} | 
 | 1691 | 		finish_wait(&pgdat->kswapd_wait, &wait); | 
 | 1692 |  | 
| Rafael J. Wysocki | b1296cc | 2007-05-06 14:50:48 -0700 | [diff] [blame] | 1693 | 		if (!try_to_freeze()) { | 
 | 1694 | 			/* We can speed up thawing tasks if we don't call | 
 | 1695 | 			 * balance_pgdat after returning from the refrigerator | 
 | 1696 | 			 */ | 
 | 1697 | 			balance_pgdat(pgdat, order); | 
 | 1698 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | 	} | 
 | 1700 | 	return 0; | 
 | 1701 | } | 
 | 1702 |  | 
 | 1703 | /* | 
 | 1704 |  * A zone is low on free memory, so wake its kswapd task to service it. | 
 | 1705 |  */ | 
 | 1706 | void wakeup_kswapd(struct zone *zone, int order) | 
 | 1707 | { | 
 | 1708 | 	pg_data_t *pgdat; | 
 | 1709 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1710 | 	if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | 		return; | 
 | 1712 |  | 
 | 1713 | 	pgdat = zone->zone_pgdat; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1714 | 	if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1715 | 		return; | 
 | 1716 | 	if (pgdat->kswapd_max_order < order) | 
 | 1717 | 		pgdat->kswapd_max_order = order; | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 1718 | 	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | 		return; | 
| Con Kolivas | 8d0986e | 2005-09-13 01:25:07 -0700 | [diff] [blame] | 1720 | 	if (!waitqueue_active(&pgdat->kswapd_wait)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1721 | 		return; | 
| Con Kolivas | 8d0986e | 2005-09-13 01:25:07 -0700 | [diff] [blame] | 1722 | 	wake_up_interruptible(&pgdat->kswapd_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1723 | } | 
 | 1724 |  | 
 | 1725 | #ifdef CONFIG_PM | 
 | 1726 | /* | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1727 |  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages | 
 | 1728 |  * from LRU lists system-wide, for given pass and priority, and returns the | 
 | 1729 |  * number of reclaimed pages | 
 | 1730 |  * | 
 | 1731 |  * For pass > 3 we also try to shrink the LRU lists that contain a few pages | 
 | 1732 |  */ | 
| Nigel Cunningham | e07aa05 | 2006-12-22 01:07:21 -0800 | [diff] [blame] | 1733 | static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | 
 | 1734 | 				      int pass, struct scan_control *sc) | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1735 | { | 
 | 1736 | 	struct zone *zone; | 
 | 1737 | 	unsigned long nr_to_scan, ret = 0; | 
 | 1738 |  | 
 | 1739 | 	for_each_zone(zone) { | 
 | 1740 |  | 
 | 1741 | 		if (!populated_zone(zone)) | 
 | 1742 | 			continue; | 
 | 1743 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 1744 | 		if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1745 | 			continue; | 
 | 1746 |  | 
 | 1747 | 		/* For pass = 0 we don't shrink the active list */ | 
 | 1748 | 		if (pass > 0) { | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1749 | 			zone->nr_scan_active += | 
 | 1750 | 				(zone_page_state(zone, NR_ACTIVE) >> prio) + 1; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1751 | 			if (zone->nr_scan_active >= nr_pages || pass > 3) { | 
 | 1752 | 				zone->nr_scan_active = 0; | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1753 | 				nr_to_scan = min(nr_pages, | 
 | 1754 | 					zone_page_state(zone, NR_ACTIVE)); | 
| Martin Bligh | bbdb396 | 2006-10-28 10:38:25 -0700 | [diff] [blame] | 1755 | 				shrink_active_list(nr_to_scan, zone, sc, prio); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1756 | 			} | 
 | 1757 | 		} | 
 | 1758 |  | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1759 | 		zone->nr_scan_inactive += | 
 | 1760 | 			(zone_page_state(zone, NR_INACTIVE) >> prio) + 1; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1761 | 		if (zone->nr_scan_inactive >= nr_pages || pass > 3) { | 
 | 1762 | 			zone->nr_scan_inactive = 0; | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1763 | 			nr_to_scan = min(nr_pages, | 
 | 1764 | 				zone_page_state(zone, NR_INACTIVE)); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1765 | 			ret += shrink_inactive_list(nr_to_scan, zone, sc); | 
 | 1766 | 			if (ret >= nr_pages) | 
 | 1767 | 				return ret; | 
 | 1768 | 		} | 
 | 1769 | 	} | 
 | 1770 |  | 
 | 1771 | 	return ret; | 
 | 1772 | } | 
 | 1773 |  | 
| Andrew Morton | 76395d3 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 1774 | static unsigned long count_lru_pages(void) | 
 | 1775 | { | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 1776 | 	return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE); | 
| Andrew Morton | 76395d3 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 1777 | } | 
 | 1778 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1779 | /* | 
 | 1780 |  * Try to free `nr_pages' of memory, system-wide, and return the number of | 
 | 1781 |  * freed pages. | 
 | 1782 |  * | 
 | 1783 |  * Rather than trying to age LRUs the aim is to preserve the overall | 
 | 1784 |  * LRU order by reclaiming preferentially | 
 | 1785 |  * inactive > active > active referenced > active mapped | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1786 |  */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1787 | unsigned long shrink_all_memory(unsigned long nr_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1788 | { | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1789 | 	unsigned long lru_pages, nr_slab; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1790 | 	unsigned long ret = 0; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1791 | 	int pass; | 
 | 1792 | 	struct reclaim_state reclaim_state; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1793 | 	struct scan_control sc = { | 
 | 1794 | 		.gfp_mask = GFP_KERNEL, | 
 | 1795 | 		.may_swap = 0, | 
 | 1796 | 		.swap_cluster_max = nr_pages, | 
 | 1797 | 		.may_writepage = 1, | 
 | 1798 | 		.swappiness = vm_swappiness, | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1799 | 		.isolate_pages = isolate_pages_global, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | 	}; | 
 | 1801 |  | 
 | 1802 | 	current->reclaim_state = &reclaim_state; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1803 |  | 
| Andrew Morton | 76395d3 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 1804 | 	lru_pages = count_lru_pages(); | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1805 | 	nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1806 | 	/* If slab caches are huge, it's better to hit them first */ | 
 | 1807 | 	while (nr_slab >= lru_pages) { | 
 | 1808 | 		reclaim_state.reclaimed_slab = 0; | 
 | 1809 | 		shrink_slab(nr_pages, sc.gfp_mask, lru_pages); | 
 | 1810 | 		if (!reclaim_state.reclaimed_slab) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1811 | 			break; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1812 |  | 
 | 1813 | 		ret += reclaim_state.reclaimed_slab; | 
 | 1814 | 		if (ret >= nr_pages) | 
 | 1815 | 			goto out; | 
 | 1816 |  | 
 | 1817 | 		nr_slab -= reclaim_state.reclaimed_slab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1818 | 	} | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1819 |  | 
 | 1820 | 	/* | 
 | 1821 | 	 * We try to shrink LRUs in 5 passes: | 
 | 1822 | 	 * 0 = Reclaim from inactive_list only | 
 | 1823 | 	 * 1 = Reclaim from active list but don't reclaim mapped | 
 | 1824 | 	 * 2 = 2nd pass of type 1 | 
 | 1825 | 	 * 3 = Reclaim mapped (normal reclaim) | 
 | 1826 | 	 * 4 = 2nd pass of type 3 | 
 | 1827 | 	 */ | 
 | 1828 | 	for (pass = 0; pass < 5; pass++) { | 
 | 1829 | 		int prio; | 
 | 1830 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1831 | 		/* Force reclaiming mapped pages in the passes #3 and #4 */ | 
 | 1832 | 		if (pass > 2) { | 
 | 1833 | 			sc.may_swap = 1; | 
 | 1834 | 			sc.swappiness = 100; | 
 | 1835 | 		} | 
 | 1836 |  | 
 | 1837 | 		for (prio = DEF_PRIORITY; prio >= 0; prio--) { | 
 | 1838 | 			unsigned long nr_to_scan = nr_pages - ret; | 
 | 1839 |  | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1840 | 			sc.nr_scanned = 0; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1841 | 			ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); | 
 | 1842 | 			if (ret >= nr_pages) | 
 | 1843 | 				goto out; | 
 | 1844 |  | 
 | 1845 | 			reclaim_state.reclaimed_slab = 0; | 
| Andrew Morton | 76395d3 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 1846 | 			shrink_slab(sc.nr_scanned, sc.gfp_mask, | 
 | 1847 | 					count_lru_pages()); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1848 | 			ret += reclaim_state.reclaimed_slab; | 
 | 1849 | 			if (ret >= nr_pages) | 
 | 1850 | 				goto out; | 
 | 1851 |  | 
 | 1852 | 			if (sc.nr_scanned && prio < DEF_PRIORITY - 2) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1853 | 				congestion_wait(WRITE, HZ / 10); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1854 | 		} | 
| Rafael J. Wysocki | 248a030 | 2006-03-22 00:09:04 -0800 | [diff] [blame] | 1855 | 	} | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1856 |  | 
 | 1857 | 	/* | 
 | 1858 | 	 * If ret = 0, we could not shrink LRUs, but there may be something | 
 | 1859 | 	 * in slab caches | 
 | 1860 | 	 */ | 
| Andrew Morton | 76395d3 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 1861 | 	if (!ret) { | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1862 | 		do { | 
 | 1863 | 			reclaim_state.reclaimed_slab = 0; | 
| Andrew Morton | 76395d3 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 1864 | 			shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages()); | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1865 | 			ret += reclaim_state.reclaimed_slab; | 
 | 1866 | 		} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); | 
| Andrew Morton | 76395d3 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 1867 | 	} | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1868 |  | 
 | 1869 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1870 | 	current->reclaim_state = NULL; | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1871 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1872 | 	return ret; | 
 | 1873 | } | 
 | 1874 | #endif | 
 | 1875 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 | /* It's optimal to keep kswapds on the same CPUs as their memory, but | 
 | 1877 |    not required for correctness.  So if the last cpu in a node goes | 
 | 1878 |    away, we get changed to run anywhere: as the first one comes back, | 
 | 1879 |    restore their cpu bindings. */ | 
| Chandra Seetharaman | 9c7b216 | 2006-06-27 02:54:07 -0700 | [diff] [blame] | 1880 | static int __devinit cpu_callback(struct notifier_block *nfb, | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1881 | 				  unsigned long action, void *hcpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | { | 
| Yasunori Goto | 58c0a4a | 2007-10-16 01:25:40 -0700 | [diff] [blame] | 1883 | 	int nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1884 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1885 | 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { | 
| Yasunori Goto | 58c0a4a | 2007-10-16 01:25:40 -0700 | [diff] [blame] | 1886 | 		for_each_node_state(nid, N_HIGH_MEMORY) { | 
| Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 1887 | 			pg_data_t *pgdat = NODE_DATA(nid); | 
 | 1888 | 			node_to_cpumask_ptr(mask, pgdat->node_id); | 
 | 1889 |  | 
 | 1890 | 			if (any_online_cpu(*mask) < nr_cpu_ids) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1891 | 				/* One of our CPUs online: restore mask */ | 
| Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 1892 | 				set_cpus_allowed_ptr(pgdat->kswapd, mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1893 | 		} | 
 | 1894 | 	} | 
 | 1895 | 	return NOTIFY_OK; | 
 | 1896 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 |  | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 1898 | /* | 
 | 1899 |  * This kswapd start function will be called by init and node-hot-add. | 
 | 1900 |  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. | 
 | 1901 |  */ | 
 | 1902 | int kswapd_run(int nid) | 
 | 1903 | { | 
 | 1904 | 	pg_data_t *pgdat = NODE_DATA(nid); | 
 | 1905 | 	int ret = 0; | 
 | 1906 |  | 
 | 1907 | 	if (pgdat->kswapd) | 
 | 1908 | 		return 0; | 
 | 1909 |  | 
 | 1910 | 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); | 
 | 1911 | 	if (IS_ERR(pgdat->kswapd)) { | 
 | 1912 | 		/* failure at boot is fatal */ | 
 | 1913 | 		BUG_ON(system_state == SYSTEM_BOOTING); | 
 | 1914 | 		printk("Failed to start kswapd on node %d\n",nid); | 
 | 1915 | 		ret = -1; | 
 | 1916 | 	} | 
 | 1917 | 	return ret; | 
 | 1918 | } | 
 | 1919 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1920 | static int __init kswapd_init(void) | 
 | 1921 | { | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 1922 | 	int nid; | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1923 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1924 | 	swap_setup(); | 
| Christoph Lameter | 9422ffb | 2007-10-16 01:25:31 -0700 | [diff] [blame] | 1925 | 	for_each_node_state(nid, N_HIGH_MEMORY) | 
| Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 1926 |  		kswapd_run(nid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1927 | 	hotcpu_notifier(cpu_callback, 0); | 
 | 1928 | 	return 0; | 
 | 1929 | } | 
 | 1930 |  | 
 | 1931 | module_init(kswapd_init) | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1932 |  | 
 | 1933 | #ifdef CONFIG_NUMA | 
 | 1934 | /* | 
 | 1935 |  * Zone reclaim mode | 
 | 1936 |  * | 
 | 1937 |  * If non-zero call zone_reclaim when the number of free pages falls below | 
 | 1938 |  * the watermarks. | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1939 |  */ | 
 | 1940 | int zone_reclaim_mode __read_mostly; | 
 | 1941 |  | 
| Christoph Lameter | 1b2ffb7 | 2006-02-01 03:05:34 -0800 | [diff] [blame] | 1942 | #define RECLAIM_OFF 0 | 
 | 1943 | #define RECLAIM_ZONE (1<<0)	/* Run shrink_cache on the zone */ | 
 | 1944 | #define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */ | 
 | 1945 | #define RECLAIM_SWAP (1<<2)	/* Swap pages out during reclaim */ | 
 | 1946 |  | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1947 | /* | 
| Christoph Lameter | a92f712 | 2006-02-01 03:05:32 -0800 | [diff] [blame] | 1948 |  * Priority for ZONE_RECLAIM. This determines the fraction of pages | 
 | 1949 |  * of a node considered for each zone_reclaim. 4 scans 1/16th of | 
 | 1950 |  * a zone. | 
 | 1951 |  */ | 
 | 1952 | #define ZONE_RECLAIM_PRIORITY 4 | 
 | 1953 |  | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1954 | /* | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 1955 |  * Percentage of pages in a zone that must be unmapped for zone_reclaim to | 
 | 1956 |  * occur. | 
 | 1957 |  */ | 
 | 1958 | int sysctl_min_unmapped_ratio = 1; | 
 | 1959 |  | 
 | 1960 | /* | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1961 |  * If the number of slab pages in a zone grows beyond this percentage then | 
 | 1962 |  * slab reclaim needs to occur. | 
 | 1963 |  */ | 
 | 1964 | int sysctl_min_slab_ratio = 5; | 
 | 1965 |  | 
 | 1966 | /* | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1967 |  * Try to free up some pages from this zone through reclaim. | 
 | 1968 |  */ | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1969 | static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1970 | { | 
| Christoph Lameter | 7fb2d46 | 2006-03-22 00:08:22 -0800 | [diff] [blame] | 1971 | 	/* Minimum pages needed in order to stay on node */ | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1972 | 	const unsigned long nr_pages = 1 << order; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1973 | 	struct task_struct *p = current; | 
 | 1974 | 	struct reclaim_state reclaim_state; | 
| Christoph Lameter | 8695949 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1975 | 	int priority; | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 1976 | 	unsigned long nr_reclaimed = 0; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1977 | 	struct scan_control sc = { | 
 | 1978 | 		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 
 | 1979 | 		.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 
| Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 1980 | 		.swap_cluster_max = max_t(unsigned long, nr_pages, | 
 | 1981 | 					SWAP_CLUSTER_MAX), | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1982 | 		.gfp_mask = gfp_mask, | 
| Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 1983 | 		.swappiness = vm_swappiness, | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1984 | 		.isolate_pages = isolate_pages_global, | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 1985 | 	}; | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 1986 | 	unsigned long slab_reclaimable; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1987 |  | 
 | 1988 | 	disable_swap_token(); | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1989 | 	cond_resched(); | 
| Christoph Lameter | d4f7796 | 2006-02-24 13:04:22 -0800 | [diff] [blame] | 1990 | 	/* | 
 | 1991 | 	 * We need to be able to allocate from the reserves for RECLAIM_SWAP | 
 | 1992 | 	 * and we also need to be able to write out pages for RECLAIM_WRITE | 
 | 1993 | 	 * and RECLAIM_SWAP. | 
 | 1994 | 	 */ | 
 | 1995 | 	p->flags |= PF_MEMALLOC | PF_SWAPWRITE; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1996 | 	reclaim_state.reclaimed_slab = 0; | 
 | 1997 | 	p->reclaim_state = &reclaim_state; | 
| Christoph Lameter | c84db23 | 2006-02-01 03:05:29 -0800 | [diff] [blame] | 1998 |  | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 1999 | 	if (zone_page_state(zone, NR_FILE_PAGES) - | 
 | 2000 | 		zone_page_state(zone, NR_FILE_MAPPED) > | 
 | 2001 | 		zone->min_unmapped_pages) { | 
 | 2002 | 		/* | 
 | 2003 | 		 * Free memory by calling shrink zone with increasing | 
 | 2004 | 		 * priorities until we have enough memory freed. | 
 | 2005 | 		 */ | 
 | 2006 | 		priority = ZONE_RECLAIM_PRIORITY; | 
 | 2007 | 		do { | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 2008 | 			note_zone_scanning_priority(zone, priority); | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2009 | 			nr_reclaimed += shrink_zone(priority, zone, &sc); | 
 | 2010 | 			priority--; | 
 | 2011 | 		} while (priority >= 0 && nr_reclaimed < nr_pages); | 
 | 2012 | 	} | 
| Christoph Lameter | c84db23 | 2006-02-01 03:05:29 -0800 | [diff] [blame] | 2013 |  | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 2014 | 	slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); | 
 | 2015 | 	if (slab_reclaimable > zone->min_slab_pages) { | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 2016 | 		/* | 
| Christoph Lameter | 7fb2d46 | 2006-03-22 00:08:22 -0800 | [diff] [blame] | 2017 | 		 * shrink_slab() does not currently allow us to determine how | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2018 | 		 * many pages were freed in this zone. So we take the current | 
 | 2019 | 		 * number of slab pages and shake the slab until it is reduced | 
 | 2020 | 		 * by the same nr_pages that we used for reclaiming unmapped | 
 | 2021 | 		 * pages. | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 2022 | 		 * | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2023 | 		 * Note that shrink_slab will free memory on all zones and may | 
 | 2024 | 		 * take a long time. | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 2025 | 		 */ | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2026 | 		while (shrink_slab(sc.nr_scanned, gfp_mask, order) && | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 2027 | 			zone_page_state(zone, NR_SLAB_RECLAIMABLE) > | 
 | 2028 | 				slab_reclaimable - nr_pages) | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2029 | 			; | 
| Christoph Lameter | 83e33a4 | 2006-09-25 23:31:53 -0700 | [diff] [blame] | 2030 |  | 
 | 2031 | 		/* | 
 | 2032 | 		 * Update nr_reclaimed by the number of slab pages we | 
 | 2033 | 		 * reclaimed from this zone. | 
 | 2034 | 		 */ | 
 | 2035 | 		nr_reclaimed += slab_reclaimable - | 
 | 2036 | 			zone_page_state(zone, NR_SLAB_RECLAIMABLE); | 
| Christoph Lameter | 2a16e3f | 2006-02-01 03:05:35 -0800 | [diff] [blame] | 2037 | 	} | 
 | 2038 |  | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 2039 | 	p->reclaim_state = NULL; | 
| Christoph Lameter | d4f7796 | 2006-02-24 13:04:22 -0800 | [diff] [blame] | 2040 | 	current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); | 
| Andrew Morton | 05ff513 | 2006-03-22 00:08:20 -0800 | [diff] [blame] | 2041 | 	return nr_reclaimed >= nr_pages; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 2042 | } | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2043 |  | 
 | 2044 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | 
 | 2045 | { | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2046 | 	int node_id; | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 2047 | 	int ret; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2048 |  | 
 | 2049 | 	/* | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2050 | 	 * Zone reclaim reclaims unmapped file backed pages and | 
 | 2051 | 	 * slab pages if we are over the defined limits. | 
| Christoph Lameter | 34aa133 | 2006-06-30 01:55:37 -0700 | [diff] [blame] | 2052 | 	 * | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2053 | 	 * A small portion of unmapped file backed pages is needed for | 
 | 2054 | 	 * file I/O otherwise pages read by file I/O will be immediately | 
 | 2055 | 	 * thrown out if the zone is overallocated. So we do not reclaim | 
 | 2056 | 	 * if less than a specified percentage of the zone is used by | 
 | 2057 | 	 * unmapped file backed pages. | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2058 | 	 */ | 
| Christoph Lameter | 34aa133 | 2006-06-30 01:55:37 -0700 | [diff] [blame] | 2059 | 	if (zone_page_state(zone, NR_FILE_PAGES) - | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2060 | 	    zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages | 
 | 2061 | 	    && zone_page_state(zone, NR_SLAB_RECLAIMABLE) | 
 | 2062 | 			<= zone->min_slab_pages) | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2063 | 		return 0; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2064 |  | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 2065 | 	if (zone_is_all_unreclaimable(zone)) | 
 | 2066 | 		return 0; | 
 | 2067 |  | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2068 | 	/* | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 2069 | 	 * Do not scan if the allocation should not be delayed. | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2070 | 	 */ | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 2071 | 	if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2072 | 			return 0; | 
 | 2073 |  | 
 | 2074 | 	/* | 
 | 2075 | 	 * Only run zone reclaim on the local zone or on zones that do not | 
 | 2076 | 	 * have associated processors. This will favor the local processor | 
 | 2077 | 	 * over remote processors and spread off node memory allocations | 
 | 2078 | 	 * as wide as possible. | 
 | 2079 | 	 */ | 
| Christoph Lameter | 89fa302 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 2080 | 	node_id = zone_to_nid(zone); | 
| Christoph Lameter | 37c0708 | 2007-10-16 01:25:36 -0700 | [diff] [blame] | 2081 | 	if (node_state(node_id, N_CPU) && node_id != numa_node_id()) | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2082 | 		return 0; | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 2083 |  | 
 | 2084 | 	if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) | 
 | 2085 | 		return 0; | 
 | 2086 | 	ret = __zone_reclaim(zone, gfp_mask, order); | 
 | 2087 | 	zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); | 
 | 2088 |  | 
 | 2089 | 	return ret; | 
| Andrew Morton | 179e963 | 2006-03-22 00:08:18 -0800 | [diff] [blame] | 2090 | } | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 2091 | #endif |