| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/swap.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | /* | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 8 |  * This file contains the default values for the operation of the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  * Linux VM subsystem. Fine-tuning documentation can be found in | 
 | 10 |  * Documentation/sysctl/vm.txt. | 
 | 11 |  * Started 18.12.91 | 
 | 12 |  * Swap aging added 23.2.95, Stephen Tweedie. | 
 | 13 |  * Buffermem limits added 12.3.98, Rik van Riel. | 
 | 14 |  */ | 
 | 15 |  | 
 | 16 | #include <linux/mm.h> | 
 | 17 | #include <linux/sched.h> | 
 | 18 | #include <linux/kernel_stat.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/mman.h> | 
 | 21 | #include <linux/pagemap.h> | 
 | 22 | #include <linux/pagevec.h> | 
 | 23 | #include <linux/init.h> | 
 | 24 | #include <linux/module.h> | 
 | 25 | #include <linux/mm_inline.h> | 
 | 26 | #include <linux/buffer_head.h>	/* for try_to_release_page() */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/percpu_counter.h> | 
 | 28 | #include <linux/percpu.h> | 
 | 29 | #include <linux/cpu.h> | 
 | 30 | #include <linux/notifier.h> | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 31 | #include <linux/backing-dev.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 32 | #include <linux/memcontrol.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
| Lee Schermerhorn | 64d6519 | 2008-10-18 20:26:52 -0700 | [diff] [blame] | 35 | #include "internal.h" | 
 | 36 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | /* How many pages do we try to swap or page in/out together? */ | 
 | 38 | int page_cluster; | 
 | 39 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 40 | static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); | 
| Vegard Nossum | f84f9504 | 2008-07-23 21:28:14 -0700 | [diff] [blame] | 41 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 42 |  | 
| Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 43 | /* | 
 | 44 |  * This path almost never happens for VM activity - pages are normally | 
 | 45 |  * freed via pagevecs.  But it gets used by networking. | 
 | 46 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 47 | static void __page_cache_release(struct page *page) | 
| Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 48 | { | 
 | 49 | 	if (PageLRU(page)) { | 
 | 50 | 		unsigned long flags; | 
 | 51 | 		struct zone *zone = page_zone(page); | 
 | 52 |  | 
 | 53 | 		spin_lock_irqsave(&zone->lru_lock, flags); | 
 | 54 | 		VM_BUG_ON(!PageLRU(page)); | 
 | 55 | 		__ClearPageLRU(page); | 
 | 56 | 		del_page_from_lru(zone, page); | 
 | 57 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
 | 58 | 	} | 
| Li Hong | fc91668 | 2010-03-05 13:41:54 -0800 | [diff] [blame] | 59 | 	free_hot_cold_page(page, 0); | 
| Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 60 | } | 
 | 61 |  | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 62 | static void put_compound_page(struct page *page) | 
 | 63 | { | 
| Christoph Lameter | d85f338 | 2007-05-06 14:49:39 -0700 | [diff] [blame] | 64 | 	page = compound_head(page); | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 65 | 	if (put_page_testzero(page)) { | 
| Andy Whitcroft | 33f2ef8 | 2006-12-06 20:33:32 -0800 | [diff] [blame] | 66 | 		compound_page_dtor *dtor; | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 67 |  | 
| Andy Whitcroft | 33f2ef8 | 2006-12-06 20:33:32 -0800 | [diff] [blame] | 68 | 		dtor = get_compound_page_dtor(page); | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 69 | 		(*dtor)(page); | 
 | 70 | 	} | 
 | 71 | } | 
 | 72 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | void put_page(struct page *page) | 
 | 74 | { | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 75 | 	if (unlikely(PageCompound(page))) | 
 | 76 | 		put_compound_page(page); | 
 | 77 | 	else if (put_page_testzero(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | 		__page_cache_release(page); | 
 | 79 | } | 
 | 80 | EXPORT_SYMBOL(put_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 |  | 
| Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 82 | /** | 
| Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 83 |  * put_pages_list() - release a list of pages | 
 | 84 |  * @pages: list of pages threaded on page->lru | 
| Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 85 |  * | 
 | 86 |  * Release a list of pages which are strung together on page.lru.  Currently | 
 | 87 |  * used by read_cache_pages() and related error recovery code. | 
| Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 88 |  */ | 
 | 89 | void put_pages_list(struct list_head *pages) | 
 | 90 | { | 
 | 91 | 	while (!list_empty(pages)) { | 
 | 92 | 		struct page *victim; | 
 | 93 |  | 
 | 94 | 		victim = list_entry(pages->prev, struct page, lru); | 
 | 95 | 		list_del(&victim->lru); | 
 | 96 | 		page_cache_release(victim); | 
 | 97 | 	} | 
 | 98 | } | 
 | 99 | EXPORT_SYMBOL(put_pages_list); | 
 | 100 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | /* | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 102 |  * pagevec_move_tail() must be called with IRQ disabled. | 
 | 103 |  * Otherwise this may cause nasty races. | 
 | 104 |  */ | 
 | 105 | static void pagevec_move_tail(struct pagevec *pvec) | 
 | 106 | { | 
 | 107 | 	int i; | 
 | 108 | 	int pgmoved = 0; | 
 | 109 | 	struct zone *zone = NULL; | 
 | 110 |  | 
 | 111 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 112 | 		struct page *page = pvec->pages[i]; | 
 | 113 | 		struct zone *pagezone = page_zone(page); | 
 | 114 |  | 
 | 115 | 		if (pagezone != zone) { | 
 | 116 | 			if (zone) | 
 | 117 | 				spin_unlock(&zone->lru_lock); | 
 | 118 | 			zone = pagezone; | 
 | 119 | 			spin_lock(&zone->lru_lock); | 
 | 120 | 		} | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 121 | 		if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 
| Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 122 | 			int lru = page_lru_base_type(page); | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 123 | 			list_move_tail(&page->lru, &zone->lru[lru].list); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 124 | 			pgmoved++; | 
 | 125 | 		} | 
 | 126 | 	} | 
 | 127 | 	if (zone) | 
 | 128 | 		spin_unlock(&zone->lru_lock); | 
 | 129 | 	__count_vm_events(PGROTATED, pgmoved); | 
 | 130 | 	release_pages(pvec->pages, pvec->nr, pvec->cold); | 
 | 131 | 	pagevec_reinit(pvec); | 
 | 132 | } | 
 | 133 |  | 
 | 134 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 |  * Writeback is about to end against a page which has been marked for immediate | 
 | 136 |  * reclaim.  If it still appears to be reclaimable, move it to the tail of the | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 137 |  * inactive list. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 |  */ | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 139 | void  rotate_reclaimable_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | { | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 141 | 	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 142 | 	    !PageUnevictable(page) && PageLRU(page)) { | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 143 | 		struct pagevec *pvec; | 
 | 144 | 		unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 |  | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 146 | 		page_cache_get(page); | 
 | 147 | 		local_irq_save(flags); | 
 | 148 | 		pvec = &__get_cpu_var(lru_rotate_pvecs); | 
 | 149 | 		if (!pagevec_add(pvec, page)) | 
 | 150 | 			pagevec_move_tail(pvec); | 
 | 151 | 		local_irq_restore(flags); | 
 | 152 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } | 
 | 154 |  | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 155 | static void update_page_reclaim_stat(struct zone *zone, struct page *page, | 
 | 156 | 				     int file, int rotated) | 
 | 157 | { | 
 | 158 | 	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | 
 | 159 | 	struct zone_reclaim_stat *memcg_reclaim_stat; | 
 | 160 |  | 
 | 161 | 	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); | 
 | 162 |  | 
 | 163 | 	reclaim_stat->recent_scanned[file]++; | 
 | 164 | 	if (rotated) | 
 | 165 | 		reclaim_stat->recent_rotated[file]++; | 
 | 166 |  | 
 | 167 | 	if (!memcg_reclaim_stat) | 
 | 168 | 		return; | 
 | 169 |  | 
 | 170 | 	memcg_reclaim_stat->recent_scanned[file]++; | 
 | 171 | 	if (rotated) | 
 | 172 | 		memcg_reclaim_stat->recent_rotated[file]++; | 
 | 173 | } | 
 | 174 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | /* | 
 | 176 |  * FIXME: speed this up? | 
 | 177 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 178 | void activate_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | { | 
 | 180 | 	struct zone *zone = page_zone(page); | 
 | 181 |  | 
 | 182 | 	spin_lock_irq(&zone->lru_lock); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 183 | 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 184 | 		int file = page_is_file_cache(page); | 
| Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 185 | 		int lru = page_lru_base_type(page); | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 186 | 		del_page_from_lru_list(zone, page, lru); | 
 | 187 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | 		SetPageActive(page); | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 189 | 		lru += LRU_ACTIVE; | 
 | 190 | 		add_page_to_lru_list(zone, page, lru); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 191 | 		__count_vm_event(PGACTIVATE); | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 192 |  | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 193 | 		update_page_reclaim_stat(zone, page, file, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | 	} | 
 | 195 | 	spin_unlock_irq(&zone->lru_lock); | 
 | 196 | } | 
 | 197 |  | 
 | 198 | /* | 
 | 199 |  * Mark a page as having seen activity. | 
 | 200 |  * | 
 | 201 |  * inactive,unreferenced	->	inactive,referenced | 
 | 202 |  * inactive,referenced		->	active,unreferenced | 
 | 203 |  * active,unreferenced		->	active,referenced | 
 | 204 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 205 | void mark_page_accessed(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | { | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 207 | 	if (!PageActive(page) && !PageUnevictable(page) && | 
 | 208 | 			PageReferenced(page) && PageLRU(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | 		activate_page(page); | 
 | 210 | 		ClearPageReferenced(page); | 
 | 211 | 	} else if (!PageReferenced(page)) { | 
 | 212 | 		SetPageReferenced(page); | 
 | 213 | 	} | 
 | 214 | } | 
 | 215 |  | 
 | 216 | EXPORT_SYMBOL(mark_page_accessed); | 
 | 217 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 218 | void __lru_cache_add(struct page *page, enum lru_list lru) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | { | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 220 | 	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 |  | 
 | 222 | 	page_cache_get(page); | 
 | 223 | 	if (!pagevec_add(pvec, page)) | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 224 | 		____pagevec_lru_add(pvec, lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | 	put_cpu_var(lru_add_pvecs); | 
 | 226 | } | 
| Miklos Szeredi | 47846b0 | 2010-05-25 15:06:06 +0200 | [diff] [blame] | 227 | EXPORT_SYMBOL(__lru_cache_add); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 229 | /** | 
 | 230 |  * lru_cache_add_lru - add a page to a page list | 
 | 231 |  * @page: the page to be added to the LRU. | 
 | 232 |  * @lru: the LRU list to which the page is added. | 
 | 233 |  */ | 
 | 234 | void lru_cache_add_lru(struct page *page, enum lru_list lru) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | { | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 236 | 	if (PageActive(page)) { | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 237 | 		VM_BUG_ON(PageUnevictable(page)); | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 238 | 		ClearPageActive(page); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 239 | 	} else if (PageUnevictable(page)) { | 
 | 240 | 		VM_BUG_ON(PageActive(page)); | 
 | 241 | 		ClearPageUnevictable(page); | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 242 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 244 | 	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 245 | 	__lru_cache_add(page, lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } | 
 | 247 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 248 | /** | 
 | 249 |  * add_page_to_unevictable_list - add a page to the unevictable list | 
 | 250 |  * @page:  the page to be added to the unevictable list | 
 | 251 |  * | 
 | 252 |  * Add page directly to its zone's unevictable list.  To avoid races with | 
 | 253 |  * tasks that might be making the page evictable, through eg. munlock, | 
 | 254 |  * munmap or exit, while it's not on the lru, we want to add the page | 
 | 255 |  * while it's locked or otherwise "invisible" to other tasks.  This is | 
 | 256 |  * difficult to do when using the pagevec cache, so bypass that. | 
 | 257 |  */ | 
 | 258 | void add_page_to_unevictable_list(struct page *page) | 
 | 259 | { | 
 | 260 | 	struct zone *zone = page_zone(page); | 
 | 261 |  | 
 | 262 | 	spin_lock_irq(&zone->lru_lock); | 
 | 263 | 	SetPageUnevictable(page); | 
 | 264 | 	SetPageLRU(page); | 
 | 265 | 	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); | 
 | 266 | 	spin_unlock_irq(&zone->lru_lock); | 
 | 267 | } | 
 | 268 |  | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 269 | /* | 
 | 270 |  * Drain pages out of the cpu's pagevecs. | 
 | 271 |  * Either "cpu" is the current CPU, and preemption has already been | 
 | 272 |  * disabled; or "cpu" is being hot-unplugged, and is already dead. | 
 | 273 |  */ | 
 | 274 | static void drain_cpu_pagevecs(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | { | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 276 | 	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 277 | 	struct pagevec *pvec; | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 278 | 	int lru; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 280 | 	for_each_lru(lru) { | 
 | 281 | 		pvec = &pvecs[lru - LRU_BASE]; | 
 | 282 | 		if (pagevec_count(pvec)) | 
 | 283 | 			____pagevec_lru_add(pvec, lru); | 
 | 284 | 	} | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 285 |  | 
 | 286 | 	pvec = &per_cpu(lru_rotate_pvecs, cpu); | 
 | 287 | 	if (pagevec_count(pvec)) { | 
 | 288 | 		unsigned long flags; | 
 | 289 |  | 
 | 290 | 		/* No harm done if a racing interrupt already did this */ | 
 | 291 | 		local_irq_save(flags); | 
 | 292 | 		pagevec_move_tail(pvec); | 
 | 293 | 		local_irq_restore(flags); | 
 | 294 | 	} | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 295 | } | 
 | 296 |  | 
 | 297 | void lru_add_drain(void) | 
 | 298 | { | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 299 | 	drain_cpu_pagevecs(get_cpu()); | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 300 | 	put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } | 
 | 302 |  | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 303 | static void lru_add_drain_per_cpu(struct work_struct *dummy) | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 304 | { | 
 | 305 | 	lru_add_drain(); | 
 | 306 | } | 
 | 307 |  | 
 | 308 | /* | 
 | 309 |  * Returns 0 for success | 
 | 310 |  */ | 
 | 311 | int lru_add_drain_all(void) | 
 | 312 | { | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 313 | 	return schedule_on_each_cpu(lru_add_drain_per_cpu); | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 314 | } | 
 | 315 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 |  * Batched page_cache_release().  Decrement the reference count on all the | 
 | 318 |  * passed pages.  If it fell to zero then remove the page from the LRU and | 
 | 319 |  * free it. | 
 | 320 |  * | 
 | 321 |  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it | 
 | 322 |  * for the remainder of the operation. | 
 | 323 |  * | 
| Fernando Luis Vazquez Cao | ab33dc0 | 2008-07-29 22:33:40 -0700 | [diff] [blame] | 324 |  * The locking in this function is against shrink_inactive_list(): we recheck | 
 | 325 |  * the page count inside the lock to see whether shrink_inactive_list() | 
 | 326 |  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list() | 
 | 327 |  * will free it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 |  */ | 
 | 329 | void release_pages(struct page **pages, int nr, int cold) | 
 | 330 | { | 
 | 331 | 	int i; | 
 | 332 | 	struct pagevec pages_to_free; | 
 | 333 | 	struct zone *zone = NULL; | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 334 | 	unsigned long uninitialized_var(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 |  | 
 | 336 | 	pagevec_init(&pages_to_free, cold); | 
 | 337 | 	for (i = 0; i < nr; i++) { | 
 | 338 | 		struct page *page = pages[i]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 |  | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 340 | 		if (unlikely(PageCompound(page))) { | 
 | 341 | 			if (zone) { | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 342 | 				spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 343 | 				zone = NULL; | 
 | 344 | 			} | 
 | 345 | 			put_compound_page(page); | 
 | 346 | 			continue; | 
 | 347 | 		} | 
 | 348 |  | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 349 | 		if (!put_page_testzero(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | 			continue; | 
 | 351 |  | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 352 | 		if (PageLRU(page)) { | 
 | 353 | 			struct zone *pagezone = page_zone(page); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 354 |  | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 355 | 			if (pagezone != zone) { | 
 | 356 | 				if (zone) | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 357 | 					spin_unlock_irqrestore(&zone->lru_lock, | 
 | 358 | 									flags); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 359 | 				zone = pagezone; | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 360 | 				spin_lock_irqsave(&zone->lru_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | 			} | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 362 | 			VM_BUG_ON(!PageLRU(page)); | 
| Nick Piggin | 6745391 | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 363 | 			__ClearPageLRU(page); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 364 | 			del_page_from_lru(zone, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | 		} | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 366 |  | 
 | 367 | 		if (!pagevec_add(&pages_to_free, page)) { | 
 | 368 | 			if (zone) { | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 369 | 				spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 370 | 				zone = NULL; | 
 | 371 | 			} | 
 | 372 | 			__pagevec_free(&pages_to_free); | 
 | 373 | 			pagevec_reinit(&pages_to_free); | 
 | 374 |   		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | 	} | 
 | 376 | 	if (zone) | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 377 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 |  | 
 | 379 | 	pagevec_free(&pages_to_free); | 
 | 380 | } | 
| Miklos Szeredi | 0be8557 | 2010-10-27 15:34:46 -0700 | [diff] [blame] | 381 | EXPORT_SYMBOL(release_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 |  | 
 | 383 | /* | 
 | 384 |  * The pages which we're about to release may be in the deferred lru-addition | 
 | 385 |  * queues.  That would prevent them from really being freed right now.  That's | 
 | 386 |  * OK from a correctness point of view but is inefficient - those pages may be | 
 | 387 |  * cache-warm and we want to give them back to the page allocator ASAP. | 
 | 388 |  * | 
 | 389 |  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add() | 
 | 390 |  * and __pagevec_lru_add_active() call release_pages() directly to avoid | 
 | 391 |  * mutual recursion. | 
 | 392 |  */ | 
 | 393 | void __pagevec_release(struct pagevec *pvec) | 
 | 394 | { | 
 | 395 | 	lru_add_drain(); | 
 | 396 | 	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); | 
 | 397 | 	pagevec_reinit(pvec); | 
 | 398 | } | 
 | 399 |  | 
| Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 400 | EXPORT_SYMBOL(__pagevec_release); | 
 | 401 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 |  * Add the passed pages to the LRU, then drop the caller's refcount | 
 | 404 |  * on them.  Reinitialises the caller's pagevec. | 
 | 405 |  */ | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 406 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | { | 
 | 408 | 	int i; | 
 | 409 | 	struct zone *zone = NULL; | 
| KOSAKI Motohiro | 6e90157 | 2009-01-07 18:08:15 -0800 | [diff] [blame] | 410 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 411 | 	VM_BUG_ON(is_unevictable_lru(lru)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 |  | 
 | 413 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 414 | 		struct page *page = pvec->pages[i]; | 
 | 415 | 		struct zone *pagezone = page_zone(page); | 
| Rik van Riel | 9ff473b | 2008-12-02 10:31:52 -0800 | [diff] [blame] | 416 | 		int file; | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 417 | 		int active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 |  | 
 | 419 | 		if (pagezone != zone) { | 
 | 420 | 			if (zone) | 
 | 421 | 				spin_unlock_irq(&zone->lru_lock); | 
 | 422 | 			zone = pagezone; | 
 | 423 | 			spin_lock_irq(&zone->lru_lock); | 
 | 424 | 		} | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 425 | 		VM_BUG_ON(PageActive(page)); | 
 | 426 | 		VM_BUG_ON(PageUnevictable(page)); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 427 | 		VM_BUG_ON(PageLRU(page)); | 
| Nick Piggin | 8d438f9 | 2006-03-22 00:07:59 -0800 | [diff] [blame] | 428 | 		SetPageLRU(page); | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 429 | 		active = is_active_lru(lru); | 
| Rik van Riel | 9ff473b | 2008-12-02 10:31:52 -0800 | [diff] [blame] | 430 | 		file = is_file_lru(lru); | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 431 | 		if (active) | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 432 | 			SetPageActive(page); | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 433 | 		update_page_reclaim_stat(zone, page, file, active); | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 434 | 		add_page_to_lru_list(zone, page, lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | 	} | 
 | 436 | 	if (zone) | 
 | 437 | 		spin_unlock_irq(&zone->lru_lock); | 
 | 438 | 	release_pages(pvec->pages, pvec->nr, pvec->cold); | 
 | 439 | 	pagevec_reinit(pvec); | 
 | 440 | } | 
 | 441 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 442 | EXPORT_SYMBOL(____pagevec_lru_add); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 |  | 
 | 444 | /* | 
 | 445 |  * Try to drop buffers from the pages in a pagevec | 
 | 446 |  */ | 
 | 447 | void pagevec_strip(struct pagevec *pvec) | 
 | 448 | { | 
 | 449 | 	int i; | 
 | 450 |  | 
 | 451 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 452 | 		struct page *page = pvec->pages[i]; | 
 | 453 |  | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 454 | 		if (page_has_private(page) && trylock_page(page)) { | 
 | 455 | 			if (page_has_private(page)) | 
| Christoph Lameter | 5b40dc7 | 2006-03-16 23:04:07 -0800 | [diff] [blame] | 456 | 				try_to_release_page(page, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | 			unlock_page(page); | 
 | 458 | 		} | 
 | 459 | 	} | 
 | 460 | } | 
 | 461 |  | 
 | 462 | /** | 
 | 463 |  * pagevec_lookup - gang pagecache lookup | 
 | 464 |  * @pvec:	Where the resulting pages are placed | 
 | 465 |  * @mapping:	The address_space to search | 
 | 466 |  * @start:	The starting page index | 
 | 467 |  * @nr_pages:	The maximum number of pages | 
 | 468 |  * | 
 | 469 |  * pagevec_lookup() will search for and return a group of up to @nr_pages pages | 
 | 470 |  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a | 
 | 471 |  * reference against the pages in @pvec. | 
 | 472 |  * | 
 | 473 |  * The search returns a group of mapping-contiguous pages with ascending | 
 | 474 |  * indexes.  There may be holes in the indices due to not-present pages. | 
 | 475 |  * | 
 | 476 |  * pagevec_lookup() returns the number of pages which were found. | 
 | 477 |  */ | 
 | 478 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | 
 | 479 | 		pgoff_t start, unsigned nr_pages) | 
 | 480 | { | 
 | 481 | 	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); | 
 | 482 | 	return pagevec_count(pvec); | 
 | 483 | } | 
 | 484 |  | 
| Christoph Hellwig | 78539fd | 2006-01-11 20:47:41 +1100 | [diff] [blame] | 485 | EXPORT_SYMBOL(pagevec_lookup); | 
 | 486 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, | 
 | 488 | 		pgoff_t *index, int tag, unsigned nr_pages) | 
 | 489 | { | 
 | 490 | 	pvec->nr = find_get_pages_tag(mapping, index, tag, | 
 | 491 | 					nr_pages, pvec->pages); | 
 | 492 | 	return pagevec_count(pvec); | 
 | 493 | } | 
 | 494 |  | 
| Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 495 | EXPORT_SYMBOL(pagevec_lookup_tag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | /* | 
 | 498 |  * Perform any setup for the swap system | 
 | 499 |  */ | 
 | 500 | void __init swap_setup(void) | 
 | 501 | { | 
| Jan Beulich | 4481374 | 2009-09-21 17:03:05 -0700 | [diff] [blame] | 502 | 	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 504 | #ifdef CONFIG_SWAP | 
 | 505 | 	bdi_init(swapper_space.backing_dev_info); | 
 | 506 | #endif | 
 | 507 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | 	/* Use a smaller cluster for small-memory machines */ | 
 | 509 | 	if (megs < 16) | 
 | 510 | 		page_cluster = 2; | 
 | 511 | 	else | 
 | 512 | 		page_cluster = 3; | 
 | 513 | 	/* | 
 | 514 | 	 * Right now other parts of the system means that we | 
 | 515 | 	 * _really_ don't want to cluster much more | 
 | 516 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | } |