| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/swap.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | /* | 
 | 8 |  * This file contains the default values for the opereation of the | 
 | 9 |  * Linux VM subsystem. Fine-tuning documentation can be found in | 
 | 10 |  * Documentation/sysctl/vm.txt. | 
 | 11 |  * Started 18.12.91 | 
 | 12 |  * Swap aging added 23.2.95, Stephen Tweedie. | 
 | 13 |  * Buffermem limits added 12.3.98, Rik van Riel. | 
 | 14 |  */ | 
 | 15 |  | 
 | 16 | #include <linux/mm.h> | 
 | 17 | #include <linux/sched.h> | 
 | 18 | #include <linux/kernel_stat.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/mman.h> | 
 | 21 | #include <linux/pagemap.h> | 
 | 22 | #include <linux/pagevec.h> | 
 | 23 | #include <linux/init.h> | 
 | 24 | #include <linux/module.h> | 
 | 25 | #include <linux/mm_inline.h> | 
 | 26 | #include <linux/buffer_head.h>	/* for try_to_release_page() */ | 
 | 27 | #include <linux/module.h> | 
 | 28 | #include <linux/percpu_counter.h> | 
 | 29 | #include <linux/percpu.h> | 
 | 30 | #include <linux/cpu.h> | 
 | 31 | #include <linux/notifier.h> | 
 | 32 | #include <linux/init.h> | 
 | 33 |  | 
 | 34 | /* How many pages do we try to swap or page in/out together? */ | 
 | 35 | int page_cluster; | 
 | 36 |  | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 37 | static void put_compound_page(struct page *page) | 
 | 38 | { | 
 | 39 | 	page = (struct page *)page_private(page); | 
 | 40 | 	if (put_page_testzero(page)) { | 
 | 41 | 		void (*dtor)(struct page *page); | 
 | 42 |  | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 43 | 		dtor = (void (*)(struct page *))page[1].lru.next; | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 44 | 		(*dtor)(page); | 
 | 45 | 	} | 
 | 46 | } | 
 | 47 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | void put_page(struct page *page) | 
 | 49 | { | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 50 | 	if (unlikely(PageCompound(page))) | 
 | 51 | 		put_compound_page(page); | 
 | 52 | 	else if (put_page_testzero(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | 		__page_cache_release(page); | 
 | 54 | } | 
 | 55 | EXPORT_SYMBOL(put_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 |  | 
 | 57 | /* | 
 | 58 |  * Writeback is about to end against a page which has been marked for immediate | 
 | 59 |  * reclaim.  If it still appears to be reclaimable, move it to the tail of the | 
 | 60 |  * inactive list.  The page still has PageWriteback set, which will pin it. | 
 | 61 |  * | 
 | 62 |  * We don't expect many pages to come through here, so don't bother batching | 
 | 63 |  * things up. | 
 | 64 |  * | 
 | 65 |  * To avoid placing the page at the tail of the LRU while PG_writeback is still | 
 | 66 |  * set, this function will clear PG_writeback before performing the page | 
 | 67 |  * motion.  Do that inside the lru lock because once PG_writeback is cleared | 
 | 68 |  * we may not touch the page. | 
 | 69 |  * | 
 | 70 |  * Returns zero if it cleared PG_writeback. | 
 | 71 |  */ | 
 | 72 | int rotate_reclaimable_page(struct page *page) | 
 | 73 | { | 
 | 74 | 	struct zone *zone; | 
 | 75 | 	unsigned long flags; | 
 | 76 |  | 
 | 77 | 	if (PageLocked(page)) | 
 | 78 | 		return 1; | 
 | 79 | 	if (PageDirty(page)) | 
 | 80 | 		return 1; | 
 | 81 | 	if (PageActive(page)) | 
 | 82 | 		return 1; | 
 | 83 | 	if (!PageLRU(page)) | 
 | 84 | 		return 1; | 
 | 85 |  | 
 | 86 | 	zone = page_zone(page); | 
 | 87 | 	spin_lock_irqsave(&zone->lru_lock, flags); | 
 | 88 | 	if (PageLRU(page) && !PageActive(page)) { | 
 | 89 | 		list_del(&page->lru); | 
 | 90 | 		list_add_tail(&page->lru, &zone->inactive_list); | 
 | 91 | 		inc_page_state(pgrotated); | 
 | 92 | 	} | 
 | 93 | 	if (!test_clear_page_writeback(page)) | 
 | 94 | 		BUG(); | 
 | 95 | 	spin_unlock_irqrestore(&zone->lru_lock, flags); | 
 | 96 | 	return 0; | 
 | 97 | } | 
 | 98 |  | 
 | 99 | /* | 
 | 100 |  * FIXME: speed this up? | 
 | 101 |  */ | 
 | 102 | void fastcall activate_page(struct page *page) | 
 | 103 | { | 
 | 104 | 	struct zone *zone = page_zone(page); | 
 | 105 |  | 
 | 106 | 	spin_lock_irq(&zone->lru_lock); | 
 | 107 | 	if (PageLRU(page) && !PageActive(page)) { | 
 | 108 | 		del_page_from_inactive_list(zone, page); | 
 | 109 | 		SetPageActive(page); | 
 | 110 | 		add_page_to_active_list(zone, page); | 
 | 111 | 		inc_page_state(pgactivate); | 
 | 112 | 	} | 
 | 113 | 	spin_unlock_irq(&zone->lru_lock); | 
 | 114 | } | 
 | 115 |  | 
 | 116 | /* | 
 | 117 |  * Mark a page as having seen activity. | 
 | 118 |  * | 
 | 119 |  * inactive,unreferenced	->	inactive,referenced | 
 | 120 |  * inactive,referenced		->	active,unreferenced | 
 | 121 |  * active,unreferenced		->	active,referenced | 
 | 122 |  */ | 
 | 123 | void fastcall mark_page_accessed(struct page *page) | 
 | 124 | { | 
 | 125 | 	if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { | 
 | 126 | 		activate_page(page); | 
 | 127 | 		ClearPageReferenced(page); | 
 | 128 | 	} else if (!PageReferenced(page)) { | 
 | 129 | 		SetPageReferenced(page); | 
 | 130 | 	} | 
 | 131 | } | 
 | 132 |  | 
 | 133 | EXPORT_SYMBOL(mark_page_accessed); | 
 | 134 |  | 
 | 135 | /** | 
 | 136 |  * lru_cache_add: add a page to the page lists | 
 | 137 |  * @page: the page to add | 
 | 138 |  */ | 
 | 139 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; | 
 | 140 | static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; | 
 | 141 |  | 
 | 142 | void fastcall lru_cache_add(struct page *page) | 
 | 143 | { | 
 | 144 | 	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); | 
 | 145 |  | 
 | 146 | 	page_cache_get(page); | 
 | 147 | 	if (!pagevec_add(pvec, page)) | 
 | 148 | 		__pagevec_lru_add(pvec); | 
 | 149 | 	put_cpu_var(lru_add_pvecs); | 
 | 150 | } | 
 | 151 |  | 
 | 152 | void fastcall lru_cache_add_active(struct page *page) | 
 | 153 | { | 
 | 154 | 	struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); | 
 | 155 |  | 
 | 156 | 	page_cache_get(page); | 
 | 157 | 	if (!pagevec_add(pvec, page)) | 
 | 158 | 		__pagevec_lru_add_active(pvec); | 
 | 159 | 	put_cpu_var(lru_add_active_pvecs); | 
 | 160 | } | 
 | 161 |  | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 162 | static void __lru_add_drain(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | { | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 164 | 	struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 |  | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 166 | 	/* CPU is dead, so no locking needed. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | 	if (pagevec_count(pvec)) | 
 | 168 | 		__pagevec_lru_add(pvec); | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 169 | 	pvec = &per_cpu(lru_add_active_pvecs, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | 	if (pagevec_count(pvec)) | 
 | 171 | 		__pagevec_lru_add_active(pvec); | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 172 | } | 
 | 173 |  | 
 | 174 | void lru_add_drain(void) | 
 | 175 | { | 
 | 176 | 	__lru_add_drain(get_cpu()); | 
 | 177 | 	put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | } | 
 | 179 |  | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 180 | #ifdef CONFIG_NUMA | 
 | 181 | static void lru_add_drain_per_cpu(void *dummy) | 
 | 182 | { | 
 | 183 | 	lru_add_drain(); | 
 | 184 | } | 
 | 185 |  | 
 | 186 | /* | 
 | 187 |  * Returns 0 for success | 
 | 188 |  */ | 
 | 189 | int lru_add_drain_all(void) | 
 | 190 | { | 
 | 191 | 	return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | 
 | 192 | } | 
 | 193 |  | 
 | 194 | #else | 
 | 195 |  | 
 | 196 | /* | 
 | 197 |  * Returns 0 for success | 
 | 198 |  */ | 
 | 199 | int lru_add_drain_all(void) | 
 | 200 | { | 
 | 201 | 	lru_add_drain(); | 
 | 202 | 	return 0; | 
 | 203 | } | 
 | 204 | #endif | 
 | 205 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | /* | 
 | 207 |  * This path almost never happens for VM activity - pages are normally | 
 | 208 |  * freed via pagevecs.  But it gets used by networking. | 
 | 209 |  */ | 
 | 210 | void fastcall __page_cache_release(struct page *page) | 
 | 211 | { | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 212 | 	if (PageLRU(page)) { | 
 | 213 | 		unsigned long flags; | 
 | 214 | 		struct zone *zone = page_zone(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 |  | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 216 | 		spin_lock_irqsave(&zone->lru_lock, flags); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 217 | 		BUG_ON(!PageLRU(page)); | 
| Nick Piggin | 6745391 | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 218 | 		__ClearPageLRU(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | 		del_page_from_lru(zone, page); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 220 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
 | 221 | 	} | 
 | 222 | 	free_hot_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | EXPORT_SYMBOL(__page_cache_release); | 
 | 225 |  | 
 | 226 | /* | 
 | 227 |  * Batched page_cache_release().  Decrement the reference count on all the | 
 | 228 |  * passed pages.  If it fell to zero then remove the page from the LRU and | 
 | 229 |  * free it. | 
 | 230 |  * | 
 | 231 |  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it | 
 | 232 |  * for the remainder of the operation. | 
 | 233 |  * | 
 | 234 |  * The locking in this function is against shrink_cache(): we recheck the | 
 | 235 |  * page count inside the lock to see whether shrink_cache grabbed the page | 
 | 236 |  * via the LRU.  If it did, give up: shrink_cache will free it. | 
 | 237 |  */ | 
 | 238 | void release_pages(struct page **pages, int nr, int cold) | 
 | 239 | { | 
 | 240 | 	int i; | 
 | 241 | 	struct pagevec pages_to_free; | 
 | 242 | 	struct zone *zone = NULL; | 
 | 243 |  | 
 | 244 | 	pagevec_init(&pages_to_free, cold); | 
 | 245 | 	for (i = 0; i < nr; i++) { | 
 | 246 | 		struct page *page = pages[i]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 |  | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 248 | 		if (unlikely(PageCompound(page))) { | 
 | 249 | 			if (zone) { | 
 | 250 | 				spin_unlock_irq(&zone->lru_lock); | 
 | 251 | 				zone = NULL; | 
 | 252 | 			} | 
 | 253 | 			put_compound_page(page); | 
 | 254 | 			continue; | 
 | 255 | 		} | 
 | 256 |  | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 257 | 		if (!put_page_testzero(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | 			continue; | 
 | 259 |  | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 260 | 		if (PageLRU(page)) { | 
 | 261 | 			struct zone *pagezone = page_zone(page); | 
 | 262 | 			if (pagezone != zone) { | 
 | 263 | 				if (zone) | 
 | 264 | 					spin_unlock_irq(&zone->lru_lock); | 
 | 265 | 				zone = pagezone; | 
 | 266 | 				spin_lock_irq(&zone->lru_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | 			} | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 268 | 			BUG_ON(!PageLRU(page)); | 
| Nick Piggin | 6745391 | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 269 | 			__ClearPageLRU(page); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 270 | 			del_page_from_lru(zone, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | 		} | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 272 |  | 
 | 273 | 		if (!pagevec_add(&pages_to_free, page)) { | 
 | 274 | 			if (zone) { | 
 | 275 | 				spin_unlock_irq(&zone->lru_lock); | 
 | 276 | 				zone = NULL; | 
 | 277 | 			} | 
 | 278 | 			__pagevec_free(&pages_to_free); | 
 | 279 | 			pagevec_reinit(&pages_to_free); | 
 | 280 |   		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | 	} | 
 | 282 | 	if (zone) | 
 | 283 | 		spin_unlock_irq(&zone->lru_lock); | 
 | 284 |  | 
 | 285 | 	pagevec_free(&pages_to_free); | 
 | 286 | } | 
 | 287 |  | 
 | 288 | /* | 
 | 289 |  * The pages which we're about to release may be in the deferred lru-addition | 
 | 290 |  * queues.  That would prevent them from really being freed right now.  That's | 
 | 291 |  * OK from a correctness point of view but is inefficient - those pages may be | 
 | 292 |  * cache-warm and we want to give them back to the page allocator ASAP. | 
 | 293 |  * | 
 | 294 |  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add() | 
 | 295 |  * and __pagevec_lru_add_active() call release_pages() directly to avoid | 
 | 296 |  * mutual recursion. | 
 | 297 |  */ | 
 | 298 | void __pagevec_release(struct pagevec *pvec) | 
 | 299 | { | 
 | 300 | 	lru_add_drain(); | 
 | 301 | 	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); | 
 | 302 | 	pagevec_reinit(pvec); | 
 | 303 | } | 
 | 304 |  | 
| Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 305 | EXPORT_SYMBOL(__pagevec_release); | 
 | 306 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | /* | 
 | 308 |  * pagevec_release() for pages which are known to not be on the LRU | 
 | 309 |  * | 
 | 310 |  * This function reinitialises the caller's pagevec. | 
 | 311 |  */ | 
 | 312 | void __pagevec_release_nonlru(struct pagevec *pvec) | 
 | 313 | { | 
 | 314 | 	int i; | 
 | 315 | 	struct pagevec pages_to_free; | 
 | 316 |  | 
 | 317 | 	pagevec_init(&pages_to_free, pvec->cold); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 319 | 		struct page *page = pvec->pages[i]; | 
 | 320 |  | 
 | 321 | 		BUG_ON(PageLRU(page)); | 
 | 322 | 		if (put_page_testzero(page)) | 
 | 323 | 			pagevec_add(&pages_to_free, page); | 
 | 324 | 	} | 
 | 325 | 	pagevec_free(&pages_to_free); | 
 | 326 | 	pagevec_reinit(pvec); | 
 | 327 | } | 
 | 328 |  | 
 | 329 | /* | 
 | 330 |  * Add the passed pages to the LRU, then drop the caller's refcount | 
 | 331 |  * on them.  Reinitialises the caller's pagevec. | 
 | 332 |  */ | 
 | 333 | void __pagevec_lru_add(struct pagevec *pvec) | 
 | 334 | { | 
 | 335 | 	int i; | 
 | 336 | 	struct zone *zone = NULL; | 
 | 337 |  | 
 | 338 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 339 | 		struct page *page = pvec->pages[i]; | 
 | 340 | 		struct zone *pagezone = page_zone(page); | 
 | 341 |  | 
 | 342 | 		if (pagezone != zone) { | 
 | 343 | 			if (zone) | 
 | 344 | 				spin_unlock_irq(&zone->lru_lock); | 
 | 345 | 			zone = pagezone; | 
 | 346 | 			spin_lock_irq(&zone->lru_lock); | 
 | 347 | 		} | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 348 | 		BUG_ON(PageLRU(page)); | 
 | 349 | 		SetPageLRU(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | 		add_page_to_inactive_list(zone, page); | 
 | 351 | 	} | 
 | 352 | 	if (zone) | 
 | 353 | 		spin_unlock_irq(&zone->lru_lock); | 
 | 354 | 	release_pages(pvec->pages, pvec->nr, pvec->cold); | 
 | 355 | 	pagevec_reinit(pvec); | 
 | 356 | } | 
 | 357 |  | 
 | 358 | EXPORT_SYMBOL(__pagevec_lru_add); | 
 | 359 |  | 
 | 360 | void __pagevec_lru_add_active(struct pagevec *pvec) | 
 | 361 | { | 
 | 362 | 	int i; | 
 | 363 | 	struct zone *zone = NULL; | 
 | 364 |  | 
 | 365 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 366 | 		struct page *page = pvec->pages[i]; | 
 | 367 | 		struct zone *pagezone = page_zone(page); | 
 | 368 |  | 
 | 369 | 		if (pagezone != zone) { | 
 | 370 | 			if (zone) | 
 | 371 | 				spin_unlock_irq(&zone->lru_lock); | 
 | 372 | 			zone = pagezone; | 
 | 373 | 			spin_lock_irq(&zone->lru_lock); | 
 | 374 | 		} | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 375 | 		BUG_ON(PageLRU(page)); | 
 | 376 | 		SetPageLRU(page); | 
| Nick Piggin | 4c84cac | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 377 | 		BUG_ON(PageActive(page)); | 
 | 378 | 		SetPageActive(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | 		add_page_to_active_list(zone, page); | 
 | 380 | 	} | 
 | 381 | 	if (zone) | 
 | 382 | 		spin_unlock_irq(&zone->lru_lock); | 
 | 383 | 	release_pages(pvec->pages, pvec->nr, pvec->cold); | 
 | 384 | 	pagevec_reinit(pvec); | 
 | 385 | } | 
 | 386 |  | 
 | 387 | /* | 
 | 388 |  * Try to drop buffers from the pages in a pagevec | 
 | 389 |  */ | 
 | 390 | void pagevec_strip(struct pagevec *pvec) | 
 | 391 | { | 
 | 392 | 	int i; | 
 | 393 |  | 
 | 394 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 395 | 		struct page *page = pvec->pages[i]; | 
 | 396 |  | 
 | 397 | 		if (PagePrivate(page) && !TestSetPageLocked(page)) { | 
| Christoph Lameter | 5b40dc7 | 2006-03-16 23:04:07 -0800 | [diff] [blame] | 398 | 			if (PagePrivate(page)) | 
 | 399 | 				try_to_release_page(page, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | 			unlock_page(page); | 
 | 401 | 		} | 
 | 402 | 	} | 
 | 403 | } | 
 | 404 |  | 
 | 405 | /** | 
 | 406 |  * pagevec_lookup - gang pagecache lookup | 
 | 407 |  * @pvec:	Where the resulting pages are placed | 
 | 408 |  * @mapping:	The address_space to search | 
 | 409 |  * @start:	The starting page index | 
 | 410 |  * @nr_pages:	The maximum number of pages | 
 | 411 |  * | 
 | 412 |  * pagevec_lookup() will search for and return a group of up to @nr_pages pages | 
 | 413 |  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a | 
 | 414 |  * reference against the pages in @pvec. | 
 | 415 |  * | 
 | 416 |  * The search returns a group of mapping-contiguous pages with ascending | 
 | 417 |  * indexes.  There may be holes in the indices due to not-present pages. | 
 | 418 |  * | 
 | 419 |  * pagevec_lookup() returns the number of pages which were found. | 
 | 420 |  */ | 
 | 421 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | 
 | 422 | 		pgoff_t start, unsigned nr_pages) | 
 | 423 | { | 
 | 424 | 	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); | 
 | 425 | 	return pagevec_count(pvec); | 
 | 426 | } | 
 | 427 |  | 
| Christoph Hellwig | 78539fd | 2006-01-11 20:47:41 +1100 | [diff] [blame] | 428 | EXPORT_SYMBOL(pagevec_lookup); | 
 | 429 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, | 
 | 431 | 		pgoff_t *index, int tag, unsigned nr_pages) | 
 | 432 | { | 
 | 433 | 	pvec->nr = find_get_pages_tag(mapping, index, tag, | 
 | 434 | 					nr_pages, pvec->pages); | 
 | 435 | 	return pagevec_count(pvec); | 
 | 436 | } | 
 | 437 |  | 
| Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 438 | EXPORT_SYMBOL(pagevec_lookup_tag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 |  | 
 | 440 | #ifdef CONFIG_SMP | 
 | 441 | /* | 
 | 442 |  * We tolerate a little inaccuracy to avoid ping-ponging the counter between | 
 | 443 |  * CPUs | 
 | 444 |  */ | 
 | 445 | #define ACCT_THRESHOLD	max(16, NR_CPUS * 2) | 
 | 446 |  | 
 | 447 | static DEFINE_PER_CPU(long, committed_space) = 0; | 
 | 448 |  | 
 | 449 | void vm_acct_memory(long pages) | 
 | 450 | { | 
 | 451 | 	long *local; | 
 | 452 |  | 
 | 453 | 	preempt_disable(); | 
 | 454 | 	local = &__get_cpu_var(committed_space); | 
 | 455 | 	*local += pages; | 
 | 456 | 	if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { | 
 | 457 | 		atomic_add(*local, &vm_committed_space); | 
 | 458 | 		*local = 0; | 
 | 459 | 	} | 
 | 460 | 	preempt_enable(); | 
 | 461 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 |  | 
 | 463 | #ifdef CONFIG_HOTPLUG_CPU | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 |  | 
 | 465 | /* Drop the CPU's cached committed space back into the central pool. */ | 
 | 466 | static int cpu_swap_callback(struct notifier_block *nfb, | 
 | 467 | 			     unsigned long action, | 
 | 468 | 			     void *hcpu) | 
 | 469 | { | 
 | 470 | 	long *committed; | 
 | 471 |  | 
 | 472 | 	committed = &per_cpu(committed_space, (long)hcpu); | 
 | 473 | 	if (action == CPU_DEAD) { | 
 | 474 | 		atomic_add(*committed, &vm_committed_space); | 
 | 475 | 		*committed = 0; | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 476 | 		__lru_add_drain((long)hcpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | 	} | 
 | 478 | 	return NOTIFY_OK; | 
 | 479 | } | 
 | 480 | #endif /* CONFIG_HOTPLUG_CPU */ | 
 | 481 | #endif /* CONFIG_SMP */ | 
 | 482 |  | 
 | 483 | #ifdef CONFIG_SMP | 
 | 484 | void percpu_counter_mod(struct percpu_counter *fbc, long amount) | 
 | 485 | { | 
 | 486 | 	long count; | 
 | 487 | 	long *pcount; | 
 | 488 | 	int cpu = get_cpu(); | 
 | 489 |  | 
 | 490 | 	pcount = per_cpu_ptr(fbc->counters, cpu); | 
 | 491 | 	count = *pcount + amount; | 
 | 492 | 	if (count >= FBC_BATCH || count <= -FBC_BATCH) { | 
 | 493 | 		spin_lock(&fbc->lock); | 
 | 494 | 		fbc->count += count; | 
| Andrew Morton | e2bab3d | 2006-03-07 21:55:31 -0800 | [diff] [blame] | 495 | 		*pcount = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | 		spin_unlock(&fbc->lock); | 
| Andrew Morton | e2bab3d | 2006-03-07 21:55:31 -0800 | [diff] [blame] | 497 | 	} else { | 
 | 498 | 		*pcount = count; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | 	put_cpu(); | 
 | 501 | } | 
 | 502 | EXPORT_SYMBOL(percpu_counter_mod); | 
| Andrew Morton | e2bab3d | 2006-03-07 21:55:31 -0800 | [diff] [blame] | 503 |  | 
 | 504 | /* | 
 | 505 |  * Add up all the per-cpu counts, return the result.  This is a more accurate | 
 | 506 |  * but much slower version of percpu_counter_read_positive() | 
 | 507 |  */ | 
 | 508 | long percpu_counter_sum(struct percpu_counter *fbc) | 
 | 509 | { | 
 | 510 | 	long ret; | 
 | 511 | 	int cpu; | 
 | 512 |  | 
 | 513 | 	spin_lock(&fbc->lock); | 
 | 514 | 	ret = fbc->count; | 
 | 515 | 	for_each_cpu(cpu) { | 
 | 516 | 		long *pcount = per_cpu_ptr(fbc->counters, cpu); | 
 | 517 | 		ret += *pcount; | 
 | 518 | 	} | 
 | 519 | 	spin_unlock(&fbc->lock); | 
 | 520 | 	return ret < 0 ? 0 : ret; | 
 | 521 | } | 
 | 522 | EXPORT_SYMBOL(percpu_counter_sum); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | #endif | 
 | 524 |  | 
 | 525 | /* | 
 | 526 |  * Perform any setup for the swap system | 
 | 527 |  */ | 
 | 528 | void __init swap_setup(void) | 
 | 529 | { | 
 | 530 | 	unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); | 
 | 531 |  | 
 | 532 | 	/* Use a smaller cluster for small-memory machines */ | 
 | 533 | 	if (megs < 16) | 
 | 534 | 		page_cluster = 2; | 
 | 535 | 	else | 
 | 536 | 		page_cluster = 3; | 
 | 537 | 	/* | 
 | 538 | 	 * Right now other parts of the system means that we | 
 | 539 | 	 * _really_ don't want to cluster much more | 
 | 540 | 	 */ | 
 | 541 | 	hotcpu_notifier(cpu_swap_callback, 0); | 
 | 542 | } |