| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/swap.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | /* | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 8 |  * This file contains the default values for the operation of the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  * Linux VM subsystem. Fine-tuning documentation can be found in | 
 | 10 |  * Documentation/sysctl/vm.txt. | 
 | 11 |  * Started 18.12.91 | 
 | 12 |  * Swap aging added 23.2.95, Stephen Tweedie. | 
 | 13 |  * Buffermem limits added 12.3.98, Rik van Riel. | 
 | 14 |  */ | 
 | 15 |  | 
 | 16 | #include <linux/mm.h> | 
 | 17 | #include <linux/sched.h> | 
 | 18 | #include <linux/kernel_stat.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/mman.h> | 
 | 21 | #include <linux/pagemap.h> | 
 | 22 | #include <linux/pagevec.h> | 
 | 23 | #include <linux/init.h> | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 24 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/mm_inline.h> | 
 | 26 | #include <linux/buffer_head.h>	/* for try_to_release_page() */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/percpu_counter.h> | 
 | 28 | #include <linux/percpu.h> | 
 | 29 | #include <linux/cpu.h> | 
 | 30 | #include <linux/notifier.h> | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 31 | #include <linux/backing-dev.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 32 | #include <linux/memcontrol.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
| Lee Schermerhorn | 64d6519 | 2008-10-18 20:26:52 -0700 | [diff] [blame] | 35 | #include "internal.h" | 
 | 36 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | /* How many pages do we try to swap or page in/out together? */ | 
 | 38 | int page_cluster; | 
 | 39 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 40 | static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); | 
| Vegard Nossum | f84f9504 | 2008-07-23 21:28:14 -0700 | [diff] [blame] | 41 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 42 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 43 |  | 
| Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 44 | /* | 
 | 45 |  * This path almost never happens for VM activity - pages are normally | 
 | 46 |  * freed via pagevecs.  But it gets used by networking. | 
 | 47 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 48 | static void __page_cache_release(struct page *page) | 
| Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 49 | { | 
 | 50 | 	if (PageLRU(page)) { | 
 | 51 | 		unsigned long flags; | 
 | 52 | 		struct zone *zone = page_zone(page); | 
 | 53 |  | 
 | 54 | 		spin_lock_irqsave(&zone->lru_lock, flags); | 
 | 55 | 		VM_BUG_ON(!PageLRU(page)); | 
 | 56 | 		__ClearPageLRU(page); | 
 | 57 | 		del_page_from_lru(zone, page); | 
 | 58 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
 | 59 | 	} | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 60 | } | 
 | 61 |  | 
 | 62 | static void __put_single_page(struct page *page) | 
 | 63 | { | 
 | 64 | 	__page_cache_release(page); | 
| Li Hong | fc91668 | 2010-03-05 13:41:54 -0800 | [diff] [blame] | 65 | 	free_hot_cold_page(page, 0); | 
| Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 66 | } | 
 | 67 |  | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 68 | static void __put_compound_page(struct page *page) | 
 | 69 | { | 
 | 70 | 	compound_page_dtor *dtor; | 
 | 71 |  | 
 | 72 | 	__page_cache_release(page); | 
 | 73 | 	dtor = get_compound_page_dtor(page); | 
 | 74 | 	(*dtor)(page); | 
 | 75 | } | 
 | 76 |  | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 77 | static void put_compound_page(struct page *page) | 
 | 78 | { | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 79 | 	if (unlikely(PageTail(page))) { | 
 | 80 | 		/* __split_huge_page_refcount can run under us */ | 
| Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 81 | 		struct page *page_head = compound_trans_head(page); | 
 | 82 |  | 
 | 83 | 		if (likely(page != page_head && | 
 | 84 | 			   get_page_unless_zero(page_head))) { | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 85 | 			unsigned long flags; | 
 | 86 | 			/* | 
| Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 87 | 			 * page_head wasn't a dangling pointer but it | 
 | 88 | 			 * may not be a head page anymore by the time | 
 | 89 | 			 * we obtain the lock. That is ok as long as it | 
 | 90 | 			 * can't be freed from under us. | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 91 | 			 */ | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 92 | 			flags = compound_lock_irqsave(page_head); | 
 | 93 | 			if (unlikely(!PageTail(page))) { | 
 | 94 | 				/* __split_huge_page_refcount run before us */ | 
 | 95 | 				compound_unlock_irqrestore(page_head, flags); | 
 | 96 | 				VM_BUG_ON(PageHead(page_head)); | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 97 | 				if (put_page_testzero(page_head)) | 
 | 98 | 					__put_single_page(page_head); | 
 | 99 | 			out_put_single: | 
 | 100 | 				if (put_page_testzero(page)) | 
 | 101 | 					__put_single_page(page); | 
 | 102 | 				return; | 
 | 103 | 			} | 
 | 104 | 			VM_BUG_ON(page_head != page->first_page); | 
 | 105 | 			/* | 
 | 106 | 			 * We can release the refcount taken by | 
| Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 107 | 			 * get_page_unless_zero() now that | 
 | 108 | 			 * __split_huge_page_refcount() is blocked on | 
 | 109 | 			 * the compound_lock. | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 110 | 			 */ | 
 | 111 | 			if (put_page_testzero(page_head)) | 
 | 112 | 				VM_BUG_ON(1); | 
 | 113 | 			/* __split_huge_page_refcount will wait now */ | 
| Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 114 | 			VM_BUG_ON(page_mapcount(page) <= 0); | 
 | 115 | 			atomic_dec(&page->_mapcount); | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 116 | 			VM_BUG_ON(atomic_read(&page_head->_count) <= 0); | 
| Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 117 | 			VM_BUG_ON(atomic_read(&page->_count) != 0); | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 118 | 			compound_unlock_irqrestore(page_head, flags); | 
| Andrea Arcangeli | a95a82e | 2011-01-13 15:46:33 -0800 | [diff] [blame] | 119 | 			if (put_page_testzero(page_head)) { | 
 | 120 | 				if (PageHead(page_head)) | 
 | 121 | 					__put_compound_page(page_head); | 
 | 122 | 				else | 
 | 123 | 					__put_single_page(page_head); | 
 | 124 | 			} | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 125 | 		} else { | 
 | 126 | 			/* page_head is a dangling pointer */ | 
 | 127 | 			VM_BUG_ON(PageTail(page)); | 
 | 128 | 			goto out_put_single; | 
 | 129 | 		} | 
 | 130 | 	} else if (put_page_testzero(page)) { | 
 | 131 | 		if (PageHead(page)) | 
 | 132 | 			__put_compound_page(page); | 
 | 133 | 		else | 
 | 134 | 			__put_single_page(page); | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 135 | 	} | 
 | 136 | } | 
 | 137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | void put_page(struct page *page) | 
 | 139 | { | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 140 | 	if (unlikely(PageCompound(page))) | 
 | 141 | 		put_compound_page(page); | 
 | 142 | 	else if (put_page_testzero(page)) | 
| Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 143 | 		__put_single_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | } | 
 | 145 | EXPORT_SYMBOL(put_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 |  | 
| Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 147 | /* | 
 | 148 |  * This function is exported but must not be called by anything other | 
 | 149 |  * than get_page(). It implements the slow path of get_page(). | 
 | 150 |  */ | 
 | 151 | bool __get_page_tail(struct page *page) | 
 | 152 | { | 
 | 153 | 	/* | 
 | 154 | 	 * This takes care of get_page() if run on a tail page | 
 | 155 | 	 * returned by one of the get_user_pages/follow_page variants. | 
 | 156 | 	 * get_user_pages/follow_page itself doesn't need the compound | 
 | 157 | 	 * lock because it runs __get_page_tail_foll() under the | 
 | 158 | 	 * proper PT lock that already serializes against | 
 | 159 | 	 * split_huge_page(). | 
 | 160 | 	 */ | 
 | 161 | 	unsigned long flags; | 
 | 162 | 	bool got = false; | 
 | 163 | 	struct page *page_head = compound_trans_head(page); | 
 | 164 |  | 
 | 165 | 	if (likely(page != page_head && get_page_unless_zero(page_head))) { | 
 | 166 | 		/* | 
 | 167 | 		 * page_head wasn't a dangling pointer but it | 
 | 168 | 		 * may not be a head page anymore by the time | 
 | 169 | 		 * we obtain the lock. That is ok as long as it | 
 | 170 | 		 * can't be freed from under us. | 
 | 171 | 		 */ | 
 | 172 | 		flags = compound_lock_irqsave(page_head); | 
 | 173 | 		/* here __split_huge_page_refcount won't run anymore */ | 
 | 174 | 		if (likely(PageTail(page))) { | 
 | 175 | 			__get_page_tail_foll(page, false); | 
 | 176 | 			got = true; | 
 | 177 | 		} | 
 | 178 | 		compound_unlock_irqrestore(page_head, flags); | 
 | 179 | 		if (unlikely(!got)) | 
 | 180 | 			put_page(page_head); | 
 | 181 | 	} | 
 | 182 | 	return got; | 
 | 183 | } | 
 | 184 | EXPORT_SYMBOL(__get_page_tail); | 
 | 185 |  | 
| Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 186 | /** | 
| Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 187 |  * put_pages_list() - release a list of pages | 
 | 188 |  * @pages: list of pages threaded on page->lru | 
| Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 189 |  * | 
 | 190 |  * Release a list of pages which are strung together on page.lru.  Currently | 
 | 191 |  * used by read_cache_pages() and related error recovery code. | 
| Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 192 |  */ | 
 | 193 | void put_pages_list(struct list_head *pages) | 
 | 194 | { | 
 | 195 | 	while (!list_empty(pages)) { | 
 | 196 | 		struct page *victim; | 
 | 197 |  | 
 | 198 | 		victim = list_entry(pages->prev, struct page, lru); | 
 | 199 | 		list_del(&victim->lru); | 
 | 200 | 		page_cache_release(victim); | 
 | 201 | 	} | 
 | 202 | } | 
 | 203 | EXPORT_SYMBOL(put_pages_list); | 
 | 204 |  | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 205 | static void pagevec_lru_move_fn(struct pagevec *pvec, | 
 | 206 | 				void (*move_fn)(struct page *page, void *arg), | 
 | 207 | 				void *arg) | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 208 | { | 
 | 209 | 	int i; | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 210 | 	struct zone *zone = NULL; | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 211 | 	unsigned long flags = 0; | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 212 |  | 
 | 213 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 214 | 		struct page *page = pvec->pages[i]; | 
 | 215 | 		struct zone *pagezone = page_zone(page); | 
 | 216 |  | 
 | 217 | 		if (pagezone != zone) { | 
 | 218 | 			if (zone) | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 219 | 				spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 220 | 			zone = pagezone; | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 221 | 			spin_lock_irqsave(&zone->lru_lock, flags); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 222 | 		} | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 223 |  | 
 | 224 | 		(*move_fn)(page, arg); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 225 | 	} | 
 | 226 | 	if (zone) | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 227 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Linus Torvalds | 83896fb | 2011-01-17 14:42:34 -0800 | [diff] [blame] | 228 | 	release_pages(pvec->pages, pvec->nr, pvec->cold); | 
 | 229 | 	pagevec_reinit(pvec); | 
| Shaohua Li | d8505de | 2011-01-13 15:47:33 -0800 | [diff] [blame] | 230 | } | 
 | 231 |  | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 232 | static void pagevec_move_tail_fn(struct page *page, void *arg) | 
 | 233 | { | 
 | 234 | 	int *pgmoved = arg; | 
 | 235 | 	struct zone *zone = page_zone(page); | 
 | 236 |  | 
 | 237 | 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 
 | 238 | 		enum lru_list lru = page_lru_base_type(page); | 
 | 239 | 		list_move_tail(&page->lru, &zone->lru[lru].list); | 
 | 240 | 		mem_cgroup_rotate_reclaimable_page(page); | 
 | 241 | 		(*pgmoved)++; | 
 | 242 | 	} | 
 | 243 | } | 
 | 244 |  | 
 | 245 | /* | 
 | 246 |  * pagevec_move_tail() must be called with IRQ disabled. | 
 | 247 |  * Otherwise this may cause nasty races. | 
 | 248 |  */ | 
 | 249 | static void pagevec_move_tail(struct pagevec *pvec) | 
 | 250 | { | 
 | 251 | 	int pgmoved = 0; | 
 | 252 |  | 
 | 253 | 	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); | 
 | 254 | 	__count_vm_events(PGROTATED, pgmoved); | 
 | 255 | } | 
 | 256 |  | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 257 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 |  * Writeback is about to end against a page which has been marked for immediate | 
 | 259 |  * reclaim.  If it still appears to be reclaimable, move it to the tail of the | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 260 |  * inactive list. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 |  */ | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 262 | void rotate_reclaimable_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | { | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 264 | 	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 265 | 	    !PageUnevictable(page) && PageLRU(page)) { | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 266 | 		struct pagevec *pvec; | 
 | 267 | 		unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 |  | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 269 | 		page_cache_get(page); | 
 | 270 | 		local_irq_save(flags); | 
 | 271 | 		pvec = &__get_cpu_var(lru_rotate_pvecs); | 
 | 272 | 		if (!pagevec_add(pvec, page)) | 
 | 273 | 			pagevec_move_tail(pvec); | 
 | 274 | 		local_irq_restore(flags); | 
 | 275 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | } | 
 | 277 |  | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 278 | static void update_page_reclaim_stat(struct zone *zone, struct page *page, | 
 | 279 | 				     int file, int rotated) | 
 | 280 | { | 
 | 281 | 	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | 
 | 282 | 	struct zone_reclaim_stat *memcg_reclaim_stat; | 
 | 283 |  | 
 | 284 | 	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); | 
 | 285 |  | 
 | 286 | 	reclaim_stat->recent_scanned[file]++; | 
 | 287 | 	if (rotated) | 
 | 288 | 		reclaim_stat->recent_rotated[file]++; | 
 | 289 |  | 
 | 290 | 	if (!memcg_reclaim_stat) | 
 | 291 | 		return; | 
 | 292 |  | 
 | 293 | 	memcg_reclaim_stat->recent_scanned[file]++; | 
 | 294 | 	if (rotated) | 
 | 295 | 		memcg_reclaim_stat->recent_rotated[file]++; | 
 | 296 | } | 
 | 297 |  | 
| Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 298 | static void __activate_page(struct page *page, void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | { | 
 | 300 | 	struct zone *zone = page_zone(page); | 
 | 301 |  | 
| Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 302 | 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 
 | 303 | 		int file = page_is_file_cache(page); | 
 | 304 | 		int lru = page_lru_base_type(page); | 
 | 305 | 		del_page_from_lru_list(zone, page, lru); | 
 | 306 |  | 
 | 307 | 		SetPageActive(page); | 
 | 308 | 		lru += LRU_ACTIVE; | 
 | 309 | 		add_page_to_lru_list(zone, page, lru); | 
 | 310 | 		__count_vm_event(PGACTIVATE); | 
 | 311 |  | 
 | 312 | 		update_page_reclaim_stat(zone, page, file, 1); | 
 | 313 | 	} | 
| Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 314 | } | 
 | 315 |  | 
 | 316 | #ifdef CONFIG_SMP | 
 | 317 | static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); | 
 | 318 |  | 
 | 319 | static void activate_page_drain(int cpu) | 
 | 320 | { | 
 | 321 | 	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); | 
 | 322 |  | 
 | 323 | 	if (pagevec_count(pvec)) | 
 | 324 | 		pagevec_lru_move_fn(pvec, __activate_page, NULL); | 
 | 325 | } | 
 | 326 |  | 
 | 327 | void activate_page(struct page *page) | 
 | 328 | { | 
 | 329 | 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 
 | 330 | 		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); | 
 | 331 |  | 
 | 332 | 		page_cache_get(page); | 
 | 333 | 		if (!pagevec_add(pvec, page)) | 
 | 334 | 			pagevec_lru_move_fn(pvec, __activate_page, NULL); | 
 | 335 | 		put_cpu_var(activate_page_pvecs); | 
 | 336 | 	} | 
 | 337 | } | 
 | 338 |  | 
 | 339 | #else | 
 | 340 | static inline void activate_page_drain(int cpu) | 
 | 341 | { | 
 | 342 | } | 
 | 343 |  | 
 | 344 | void activate_page(struct page *page) | 
 | 345 | { | 
 | 346 | 	struct zone *zone = page_zone(page); | 
 | 347 |  | 
 | 348 | 	spin_lock_irq(&zone->lru_lock); | 
 | 349 | 	__activate_page(page, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | 	spin_unlock_irq(&zone->lru_lock); | 
 | 351 | } | 
| Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 352 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 |  | 
 | 354 | /* | 
 | 355 |  * Mark a page as having seen activity. | 
 | 356 |  * | 
 | 357 |  * inactive,unreferenced	->	inactive,referenced | 
 | 358 |  * inactive,referenced		->	active,unreferenced | 
 | 359 |  * active,unreferenced		->	active,referenced | 
 | 360 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 361 | void mark_page_accessed(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | { | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 363 | 	if (!PageActive(page) && !PageUnevictable(page) && | 
 | 364 | 			PageReferenced(page) && PageLRU(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | 		activate_page(page); | 
 | 366 | 		ClearPageReferenced(page); | 
 | 367 | 	} else if (!PageReferenced(page)) { | 
 | 368 | 		SetPageReferenced(page); | 
 | 369 | 	} | 
 | 370 | } | 
 | 371 |  | 
 | 372 | EXPORT_SYMBOL(mark_page_accessed); | 
 | 373 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 374 | void __lru_cache_add(struct page *page, enum lru_list lru) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | { | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 376 | 	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 |  | 
 | 378 | 	page_cache_get(page); | 
 | 379 | 	if (!pagevec_add(pvec, page)) | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 380 | 		____pagevec_lru_add(pvec, lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | 	put_cpu_var(lru_add_pvecs); | 
 | 382 | } | 
| Miklos Szeredi | 47846b0 | 2010-05-25 15:06:06 +0200 | [diff] [blame] | 383 | EXPORT_SYMBOL(__lru_cache_add); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 385 | /** | 
 | 386 |  * lru_cache_add_lru - add a page to a page list | 
 | 387 |  * @page: the page to be added to the LRU. | 
 | 388 |  * @lru: the LRU list to which the page is added. | 
 | 389 |  */ | 
 | 390 | void lru_cache_add_lru(struct page *page, enum lru_list lru) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | { | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 392 | 	if (PageActive(page)) { | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 393 | 		VM_BUG_ON(PageUnevictable(page)); | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 394 | 		ClearPageActive(page); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 395 | 	} else if (PageUnevictable(page)) { | 
 | 396 | 		VM_BUG_ON(PageActive(page)); | 
 | 397 | 		ClearPageUnevictable(page); | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 398 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 400 | 	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 401 | 	__lru_cache_add(page, lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | } | 
 | 403 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 404 | /** | 
 | 405 |  * add_page_to_unevictable_list - add a page to the unevictable list | 
 | 406 |  * @page:  the page to be added to the unevictable list | 
 | 407 |  * | 
 | 408 |  * Add page directly to its zone's unevictable list.  To avoid races with | 
 | 409 |  * tasks that might be making the page evictable, through eg. munlock, | 
 | 410 |  * munmap or exit, while it's not on the lru, we want to add the page | 
 | 411 |  * while it's locked or otherwise "invisible" to other tasks.  This is | 
 | 412 |  * difficult to do when using the pagevec cache, so bypass that. | 
 | 413 |  */ | 
 | 414 | void add_page_to_unevictable_list(struct page *page) | 
 | 415 | { | 
 | 416 | 	struct zone *zone = page_zone(page); | 
 | 417 |  | 
 | 418 | 	spin_lock_irq(&zone->lru_lock); | 
 | 419 | 	SetPageUnevictable(page); | 
 | 420 | 	SetPageLRU(page); | 
 | 421 | 	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); | 
 | 422 | 	spin_unlock_irq(&zone->lru_lock); | 
 | 423 | } | 
 | 424 |  | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 425 | /* | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 426 |  * If the page can not be invalidated, it is moved to the | 
 | 427 |  * inactive list to speed up its reclaim.  It is moved to the | 
 | 428 |  * head of the list, rather than the tail, to give the flusher | 
 | 429 |  * threads some time to write it out, as this is much more | 
 | 430 |  * effective than the single-page writeout from reclaim. | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 431 |  * | 
 | 432 |  * If the page isn't page_mapped and dirty/writeback, the page | 
 | 433 |  * could reclaim asap using PG_reclaim. | 
 | 434 |  * | 
 | 435 |  * 1. active, mapped page -> none | 
 | 436 |  * 2. active, dirty/writeback page -> inactive, head, PG_reclaim | 
 | 437 |  * 3. inactive, mapped page -> none | 
 | 438 |  * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim | 
 | 439 |  * 5. inactive, clean -> inactive, tail | 
 | 440 |  * 6. Others -> none | 
 | 441 |  * | 
 | 442 |  * In 4, why it moves inactive's head, the VM expects the page would | 
 | 443 |  * be write it out by flusher threads as this is much more effective | 
 | 444 |  * than the single-page writeout from reclaim. | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 445 |  */ | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 446 | static void lru_deactivate_fn(struct page *page, void *arg) | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 447 | { | 
 | 448 | 	int lru, file; | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 449 | 	bool active; | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 450 | 	struct zone *zone = page_zone(page); | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 451 |  | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 452 | 	if (!PageLRU(page)) | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 453 | 		return; | 
 | 454 |  | 
| Minchan Kim | bad49d9 | 2011-05-11 15:13:30 -0700 | [diff] [blame] | 455 | 	if (PageUnevictable(page)) | 
 | 456 | 		return; | 
 | 457 |  | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 458 | 	/* Some processes are using the page */ | 
 | 459 | 	if (page_mapped(page)) | 
 | 460 | 		return; | 
 | 461 |  | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 462 | 	active = PageActive(page); | 
 | 463 |  | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 464 | 	file = page_is_file_cache(page); | 
 | 465 | 	lru = page_lru_base_type(page); | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 466 | 	del_page_from_lru_list(zone, page, lru + active); | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 467 | 	ClearPageActive(page); | 
 | 468 | 	ClearPageReferenced(page); | 
 | 469 | 	add_page_to_lru_list(zone, page, lru); | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 470 |  | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 471 | 	if (PageWriteback(page) || PageDirty(page)) { | 
 | 472 | 		/* | 
 | 473 | 		 * PG_reclaim could be raced with end_page_writeback | 
 | 474 | 		 * It can make readahead confusing.  But race window | 
 | 475 | 		 * is _really_ small and  it's non-critical problem. | 
 | 476 | 		 */ | 
 | 477 | 		SetPageReclaim(page); | 
 | 478 | 	} else { | 
 | 479 | 		/* | 
 | 480 | 		 * The page's writeback ends up during pagevec | 
 | 481 | 		 * We moves tha page into tail of inactive. | 
 | 482 | 		 */ | 
 | 483 | 		list_move_tail(&page->lru, &zone->lru[lru].list); | 
 | 484 | 		mem_cgroup_rotate_reclaimable_page(page); | 
 | 485 | 		__count_vm_event(PGROTATED); | 
 | 486 | 	} | 
 | 487 |  | 
 | 488 | 	if (active) | 
 | 489 | 		__count_vm_event(PGDEACTIVATE); | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 490 | 	update_page_reclaim_stat(zone, page, file, 0); | 
 | 491 | } | 
 | 492 |  | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 493 | /* | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 494 |  * Drain pages out of the cpu's pagevecs. | 
 | 495 |  * Either "cpu" is the current CPU, and preemption has already been | 
 | 496 |  * disabled; or "cpu" is being hot-unplugged, and is already dead. | 
 | 497 |  */ | 
 | 498 | static void drain_cpu_pagevecs(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | { | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 500 | 	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 501 | 	struct pagevec *pvec; | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 502 | 	int lru; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 504 | 	for_each_lru(lru) { | 
 | 505 | 		pvec = &pvecs[lru - LRU_BASE]; | 
 | 506 | 		if (pagevec_count(pvec)) | 
 | 507 | 			____pagevec_lru_add(pvec, lru); | 
 | 508 | 	} | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 509 |  | 
 | 510 | 	pvec = &per_cpu(lru_rotate_pvecs, cpu); | 
 | 511 | 	if (pagevec_count(pvec)) { | 
 | 512 | 		unsigned long flags; | 
 | 513 |  | 
 | 514 | 		/* No harm done if a racing interrupt already did this */ | 
 | 515 | 		local_irq_save(flags); | 
 | 516 | 		pagevec_move_tail(pvec); | 
 | 517 | 		local_irq_restore(flags); | 
 | 518 | 	} | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 519 |  | 
 | 520 | 	pvec = &per_cpu(lru_deactivate_pvecs, cpu); | 
 | 521 | 	if (pagevec_count(pvec)) | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 522 | 		pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | 
| Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 523 |  | 
 | 524 | 	activate_page_drain(cpu); | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 525 | } | 
 | 526 |  | 
 | 527 | /** | 
 | 528 |  * deactivate_page - forcefully deactivate a page | 
 | 529 |  * @page: page to deactivate | 
 | 530 |  * | 
 | 531 |  * This function hints the VM that @page is a good reclaim candidate, | 
 | 532 |  * for example if its invalidation fails due to the page being dirty | 
 | 533 |  * or under writeback. | 
 | 534 |  */ | 
 | 535 | void deactivate_page(struct page *page) | 
 | 536 | { | 
| Minchan Kim | 821ed6b | 2011-05-24 17:12:31 -0700 | [diff] [blame] | 537 | 	/* | 
 | 538 | 	 * In a workload with many unevictable page such as mprotect, unevictable | 
 | 539 | 	 * page deactivation for accelerating reclaim is pointless. | 
 | 540 | 	 */ | 
 | 541 | 	if (PageUnevictable(page)) | 
 | 542 | 		return; | 
 | 543 |  | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 544 | 	if (likely(get_page_unless_zero(page))) { | 
 | 545 | 		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); | 
 | 546 |  | 
 | 547 | 		if (!pagevec_add(pvec, page)) | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 548 | 			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | 
| Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 549 | 		put_cpu_var(lru_deactivate_pvecs); | 
 | 550 | 	} | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 551 | } | 
 | 552 |  | 
 | 553 | void lru_add_drain(void) | 
 | 554 | { | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 555 | 	drain_cpu_pagevecs(get_cpu()); | 
| Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 556 | 	put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | } | 
 | 558 |  | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 559 | static void lru_add_drain_per_cpu(struct work_struct *dummy) | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 560 | { | 
 | 561 | 	lru_add_drain(); | 
 | 562 | } | 
 | 563 |  | 
 | 564 | /* | 
 | 565 |  * Returns 0 for success | 
 | 566 |  */ | 
 | 567 | int lru_add_drain_all(void) | 
 | 568 | { | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 569 | 	return schedule_on_each_cpu(lru_add_drain_per_cpu); | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 570 | } | 
 | 571 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 |  * Batched page_cache_release().  Decrement the reference count on all the | 
 | 574 |  * passed pages.  If it fell to zero then remove the page from the LRU and | 
 | 575 |  * free it. | 
 | 576 |  * | 
 | 577 |  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it | 
 | 578 |  * for the remainder of the operation. | 
 | 579 |  * | 
| Fernando Luis Vazquez Cao | ab33dc0 | 2008-07-29 22:33:40 -0700 | [diff] [blame] | 580 |  * The locking in this function is against shrink_inactive_list(): we recheck | 
 | 581 |  * the page count inside the lock to see whether shrink_inactive_list() | 
 | 582 |  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list() | 
 | 583 |  * will free it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 |  */ | 
 | 585 | void release_pages(struct page **pages, int nr, int cold) | 
 | 586 | { | 
 | 587 | 	int i; | 
 | 588 | 	struct pagevec pages_to_free; | 
 | 589 | 	struct zone *zone = NULL; | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 590 | 	unsigned long uninitialized_var(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 |  | 
 | 592 | 	pagevec_init(&pages_to_free, cold); | 
 | 593 | 	for (i = 0; i < nr; i++) { | 
 | 594 | 		struct page *page = pages[i]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 |  | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 596 | 		if (unlikely(PageCompound(page))) { | 
 | 597 | 			if (zone) { | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 598 | 				spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 599 | 				zone = NULL; | 
 | 600 | 			} | 
 | 601 | 			put_compound_page(page); | 
 | 602 | 			continue; | 
 | 603 | 		} | 
 | 604 |  | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 605 | 		if (!put_page_testzero(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | 			continue; | 
 | 607 |  | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 608 | 		if (PageLRU(page)) { | 
 | 609 | 			struct zone *pagezone = page_zone(page); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 610 |  | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 611 | 			if (pagezone != zone) { | 
 | 612 | 				if (zone) | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 613 | 					spin_unlock_irqrestore(&zone->lru_lock, | 
 | 614 | 									flags); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 615 | 				zone = pagezone; | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 616 | 				spin_lock_irqsave(&zone->lru_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | 			} | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 618 | 			VM_BUG_ON(!PageLRU(page)); | 
| Nick Piggin | 6745391 | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 619 | 			__ClearPageLRU(page); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 620 | 			del_page_from_lru(zone, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | 		} | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 622 |  | 
 | 623 | 		if (!pagevec_add(&pages_to_free, page)) { | 
 | 624 | 			if (zone) { | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 625 | 				spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 626 | 				zone = NULL; | 
 | 627 | 			} | 
 | 628 | 			__pagevec_free(&pages_to_free); | 
 | 629 | 			pagevec_reinit(&pages_to_free); | 
 | 630 |   		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | 	} | 
 | 632 | 	if (zone) | 
| Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 633 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 |  | 
 | 635 | 	pagevec_free(&pages_to_free); | 
 | 636 | } | 
| Miklos Szeredi | 0be8557 | 2010-10-27 15:34:46 -0700 | [diff] [blame] | 637 | EXPORT_SYMBOL(release_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 |  | 
 | 639 | /* | 
 | 640 |  * The pages which we're about to release may be in the deferred lru-addition | 
 | 641 |  * queues.  That would prevent them from really being freed right now.  That's | 
 | 642 |  * OK from a correctness point of view but is inefficient - those pages may be | 
 | 643 |  * cache-warm and we want to give them back to the page allocator ASAP. | 
 | 644 |  * | 
 | 645 |  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add() | 
 | 646 |  * and __pagevec_lru_add_active() call release_pages() directly to avoid | 
 | 647 |  * mutual recursion. | 
 | 648 |  */ | 
 | 649 | void __pagevec_release(struct pagevec *pvec) | 
 | 650 | { | 
 | 651 | 	lru_add_drain(); | 
 | 652 | 	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); | 
 | 653 | 	pagevec_reinit(pvec); | 
 | 654 | } | 
 | 655 |  | 
| Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 656 | EXPORT_SYMBOL(__pagevec_release); | 
 | 657 |  | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 658 | /* used by __split_huge_page_refcount() */ | 
 | 659 | void lru_add_page_tail(struct zone* zone, | 
 | 660 | 		       struct page *page, struct page *page_tail) | 
 | 661 | { | 
 | 662 | 	int active; | 
 | 663 | 	enum lru_list lru; | 
 | 664 | 	const int file = 0; | 
 | 665 | 	struct list_head *head; | 
 | 666 |  | 
 | 667 | 	VM_BUG_ON(!PageHead(page)); | 
 | 668 | 	VM_BUG_ON(PageCompound(page_tail)); | 
 | 669 | 	VM_BUG_ON(PageLRU(page_tail)); | 
 | 670 | 	VM_BUG_ON(!spin_is_locked(&zone->lru_lock)); | 
 | 671 |  | 
 | 672 | 	SetPageLRU(page_tail); | 
 | 673 |  | 
 | 674 | 	if (page_evictable(page_tail, NULL)) { | 
 | 675 | 		if (PageActive(page)) { | 
 | 676 | 			SetPageActive(page_tail); | 
 | 677 | 			active = 1; | 
 | 678 | 			lru = LRU_ACTIVE_ANON; | 
 | 679 | 		} else { | 
 | 680 | 			active = 0; | 
 | 681 | 			lru = LRU_INACTIVE_ANON; | 
 | 682 | 		} | 
 | 683 | 		update_page_reclaim_stat(zone, page_tail, file, active); | 
 | 684 | 		if (likely(PageLRU(page))) | 
 | 685 | 			head = page->lru.prev; | 
 | 686 | 		else | 
 | 687 | 			head = &zone->lru[lru].list; | 
 | 688 | 		__add_page_to_lru_list(zone, page_tail, lru, head); | 
 | 689 | 	} else { | 
 | 690 | 		SetPageUnevictable(page_tail); | 
 | 691 | 		add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); | 
 | 692 | 	} | 
 | 693 | } | 
 | 694 |  | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 695 | static void ____pagevec_lru_add_fn(struct page *page, void *arg) | 
 | 696 | { | 
 | 697 | 	enum lru_list lru = (enum lru_list)arg; | 
 | 698 | 	struct zone *zone = page_zone(page); | 
 | 699 | 	int file = is_file_lru(lru); | 
 | 700 | 	int active = is_active_lru(lru); | 
 | 701 |  | 
 | 702 | 	VM_BUG_ON(PageActive(page)); | 
 | 703 | 	VM_BUG_ON(PageUnevictable(page)); | 
 | 704 | 	VM_BUG_ON(PageLRU(page)); | 
 | 705 |  | 
 | 706 | 	SetPageLRU(page); | 
 | 707 | 	if (active) | 
 | 708 | 		SetPageActive(page); | 
 | 709 | 	update_page_reclaim_stat(zone, page, file, active); | 
 | 710 | 	add_page_to_lru_list(zone, page, lru); | 
 | 711 | } | 
 | 712 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 |  * Add the passed pages to the LRU, then drop the caller's refcount | 
 | 715 |  * on them.  Reinitialises the caller's pagevec. | 
 | 716 |  */ | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 717 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | { | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 719 | 	VM_BUG_ON(is_unevictable_lru(lru)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 |  | 
| Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 721 | 	pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | } | 
 | 723 |  | 
| KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 724 | EXPORT_SYMBOL(____pagevec_lru_add); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 |  | 
 | 726 | /* | 
 | 727 |  * Try to drop buffers from the pages in a pagevec | 
 | 728 |  */ | 
 | 729 | void pagevec_strip(struct pagevec *pvec) | 
 | 730 | { | 
 | 731 | 	int i; | 
 | 732 |  | 
 | 733 | 	for (i = 0; i < pagevec_count(pvec); i++) { | 
 | 734 | 		struct page *page = pvec->pages[i]; | 
 | 735 |  | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 736 | 		if (page_has_private(page) && trylock_page(page)) { | 
 | 737 | 			if (page_has_private(page)) | 
| Christoph Lameter | 5b40dc7 | 2006-03-16 23:04:07 -0800 | [diff] [blame] | 738 | 				try_to_release_page(page, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | 			unlock_page(page); | 
 | 740 | 		} | 
 | 741 | 	} | 
 | 742 | } | 
 | 743 |  | 
 | 744 | /** | 
 | 745 |  * pagevec_lookup - gang pagecache lookup | 
 | 746 |  * @pvec:	Where the resulting pages are placed | 
 | 747 |  * @mapping:	The address_space to search | 
 | 748 |  * @start:	The starting page index | 
 | 749 |  * @nr_pages:	The maximum number of pages | 
 | 750 |  * | 
 | 751 |  * pagevec_lookup() will search for and return a group of up to @nr_pages pages | 
 | 752 |  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a | 
 | 753 |  * reference against the pages in @pvec. | 
 | 754 |  * | 
 | 755 |  * The search returns a group of mapping-contiguous pages with ascending | 
 | 756 |  * indexes.  There may be holes in the indices due to not-present pages. | 
 | 757 |  * | 
 | 758 |  * pagevec_lookup() returns the number of pages which were found. | 
 | 759 |  */ | 
 | 760 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | 
 | 761 | 		pgoff_t start, unsigned nr_pages) | 
 | 762 | { | 
 | 763 | 	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); | 
 | 764 | 	return pagevec_count(pvec); | 
 | 765 | } | 
 | 766 |  | 
| Christoph Hellwig | 78539fd | 2006-01-11 20:47:41 +1100 | [diff] [blame] | 767 | EXPORT_SYMBOL(pagevec_lookup); | 
 | 768 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, | 
 | 770 | 		pgoff_t *index, int tag, unsigned nr_pages) | 
 | 771 | { | 
 | 772 | 	pvec->nr = find_get_pages_tag(mapping, index, tag, | 
 | 773 | 					nr_pages, pvec->pages); | 
 | 774 | 	return pagevec_count(pvec); | 
 | 775 | } | 
 | 776 |  | 
| Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 777 | EXPORT_SYMBOL(pagevec_lookup_tag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | /* | 
 | 780 |  * Perform any setup for the swap system | 
 | 781 |  */ | 
 | 782 | void __init swap_setup(void) | 
 | 783 | { | 
| Jan Beulich | 4481374 | 2009-09-21 17:03:05 -0700 | [diff] [blame] | 784 | 	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 786 | #ifdef CONFIG_SWAP | 
 | 787 | 	bdi_init(swapper_space.backing_dev_info); | 
 | 788 | #endif | 
 | 789 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | 	/* Use a smaller cluster for small-memory machines */ | 
 | 791 | 	if (megs < 16) | 
 | 792 | 		page_cluster = 2; | 
 | 793 | 	else | 
 | 794 | 		page_cluster = 3; | 
 | 795 | 	/* | 
 | 796 | 	 * Right now other parts of the system means that we | 
 | 797 | 	 * _really_ don't want to cluster much more | 
 | 798 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | } |