blob: 96e1dc313b82b4c6a707876327b3349c7e993660 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
Andrew Mortone129b5c2006-09-27 01:50:00 -070022#include <linux/vmstat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28#include <linux/mm_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
Mel Gorman3e7d3442011-01-13 15:45:56 -080034#include <linux/compaction.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/notifier.h>
36#include <linux/rwsem.h>
Rafael J. Wysocki248a0302006-03-22 00:09:04 -080037#include <linux/delay.h>
Yasunori Goto3218ae12006-06-27 02:53:33 -070038#include <linux/kthread.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080039#include <linux/freezer.h>
Balbir Singh66e17072008-02-07 00:13:56 -080040#include <linux/memcontrol.h>
Keika Kobayashi873b4772008-07-25 01:48:52 -070041#include <linux/delayacct.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070042#include <linux/sysctl.h>
KOSAKI Motohiro929bea72011-04-14 15:22:12 -070043#include <linux/oom.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070044#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include <asm/tlbflush.h>
47#include <asm/div64.h>
48
49#include <linux/swapops.h>
50
Nick Piggin0f8053a2006-03-22 00:08:33 -080051#include "internal.h"
52
Mel Gorman33906bc2010-08-09 17:19:16 -070053#define CREATE_TRACE_POINTS
54#include <trace/events/vmscan.h>
55
Mel Gormanee64fc92011-01-13 15:45:55 -080056/*
Mel Gormanf3a310b2011-01-13 15:46:00 -080057 * reclaim_mode determines how the inactive list is shrunk
58 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
59 * RECLAIM_MODE_ASYNC: Do not block
60 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
Mel Gormanf3a310b2011-01-13 15:46:00 -080061 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
Mel Gorman3e7d3442011-01-13 15:45:56 -080062 * order-0 pages and then compact the zone
Mel Gormanee64fc92011-01-13 15:45:55 -080063 */
Mel Gormanf3a310b2011-01-13 15:46:00 -080064typedef unsigned __bitwise__ reclaim_mode_t;
65#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
66#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
67#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
Mel Gormanf3a310b2011-01-13 15:46:00 -080068#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -070069
Linus Torvalds1da177e2005-04-16 15:20:36 -070070struct scan_control {
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 /* Incremented by the number of inactive pages that were scanned */
72 unsigned long nr_scanned;
73
Rik van Riela79311c2009-01-06 14:40:01 -080074 /* Number of pages freed so far during a call to shrink_zones() */
75 unsigned long nr_reclaimed;
76
KOSAKI Motohiro22fba332009-12-14 17:59:10 -080077 /* How many pages shrink_list() should reclaim */
78 unsigned long nr_to_reclaim;
79
KOSAKI Motohiro7b517552009-12-14 17:59:12 -080080 unsigned long hibernation_mode;
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 /* This context's GFP mask */
Al Viro6daa0e22005-10-21 03:18:50 -040083 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85 int may_writepage;
86
Johannes Weinera6dc60f2009-03-31 15:19:30 -070087 /* Can mapped pages be reclaimed? */
88 int may_unmap;
Christoph Lameterf1fd1062006-01-18 17:42:30 -080089
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -070090 /* Can pages be swapped as part of reclaim? */
91 int may_swap;
92
Andy Whitcroft5ad333e2007-07-17 04:03:16 -070093 int order;
Balbir Singh66e17072008-02-07 00:13:56 -080094
KOSAKI Motohiro5f53e762010-05-24 14:32:37 -070095 /*
Nikanth Karthikesan415b54e2010-08-17 15:39:09 +053096 * Intend to reclaim enough continuous memory rather than reclaim
97 * enough amount of memory. i.e, mode for high order allocation.
KOSAKI Motohiro5f53e762010-05-24 14:32:37 -070098 */
Mel Gormanf3a310b2011-01-13 15:46:00 -080099 reclaim_mode_t reclaim_mode;
KOSAKI Motohiro5f53e762010-05-24 14:32:37 -0700100
Johannes Weinerf16015f2012-01-12 17:17:52 -0800101 /*
102 * The memory cgroup that hit its limit and as a result is the
103 * primary target of this reclaim invocation.
104 */
105 struct mem_cgroup *target_mem_cgroup;
Balbir Singh66e17072008-02-07 00:13:56 -0800106
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700107 /*
108 * Nodemask of nodes allowed by the caller. If NULL, all nodes
109 * are scanned.
110 */
111 nodemask_t *nodemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112};
113
Johannes Weinerf16015f2012-01-12 17:17:52 -0800114struct mem_cgroup_zone {
115 struct mem_cgroup *mem_cgroup;
116 struct zone *zone;
117};
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
120
121#ifdef ARCH_HAS_PREFETCH
122#define prefetch_prev_lru_page(_page, _base, _field) \
123 do { \
124 if ((_page)->lru.prev != _base) { \
125 struct page *prev; \
126 \
127 prev = lru_to_page(&(_page->lru)); \
128 prefetch(&prev->_field); \
129 } \
130 } while (0)
131#else
132#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
133#endif
134
135#ifdef ARCH_HAS_PREFETCHW
136#define prefetchw_prev_lru_page(_page, _base, _field) \
137 do { \
138 if ((_page)->lru.prev != _base) { \
139 struct page *prev; \
140 \
141 prev = lru_to_page(&(_page->lru)); \
142 prefetchw(&prev->_field); \
143 } \
144 } while (0)
145#else
146#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
147#endif
148
149/*
150 * From 0 .. 100. Higher means more swappy.
151 */
152int vm_swappiness = 60;
Andrew Mortonbd1e22b2006-06-23 02:03:47 -0700153long vm_total_pages; /* The total number of pages which the VM controls */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
155static LIST_HEAD(shrinker_list);
156static DECLARE_RWSEM(shrinker_rwsem);
157
Balbir Singh00f0b822008-03-04 14:28:39 -0800158#ifdef CONFIG_CGROUP_MEM_RES_CTLR
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800159static bool global_reclaim(struct scan_control *sc)
160{
Johannes Weinerf16015f2012-01-12 17:17:52 -0800161 return !sc->target_mem_cgroup;
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800162}
163
Johannes Weinerf16015f2012-01-12 17:17:52 -0800164static bool scanning_global_lru(struct mem_cgroup_zone *mz)
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800165{
Johannes Weinerf16015f2012-01-12 17:17:52 -0800166 return !mz->mem_cgroup;
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800167}
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -0800168#else
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800169static bool global_reclaim(struct scan_control *sc)
170{
171 return true;
172}
173
Johannes Weinerf16015f2012-01-12 17:17:52 -0800174static bool scanning_global_lru(struct mem_cgroup_zone *mz)
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800175{
176 return true;
177}
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -0800178#endif
179
Johannes Weinerf16015f2012-01-12 17:17:52 -0800180static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
KOSAKI Motohiro6e901572009-01-07 18:08:15 -0800181{
Johannes Weinerf16015f2012-01-12 17:17:52 -0800182 if (!scanning_global_lru(mz))
183 return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800184
Johannes Weinerf16015f2012-01-12 17:17:52 -0800185 return &mz->zone->reclaim_stat;
KOSAKI Motohiro6e901572009-01-07 18:08:15 -0800186}
187
Lisa Du36abcfd2013-09-11 14:22:36 -0700188unsigned long zone_reclaimable_pages(struct zone *zone)
189{
190 int nr;
191
192 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
193 zone_page_state(zone, NR_INACTIVE_FILE);
194
195 if (get_nr_swap_pages() > 0)
196 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
197 zone_page_state(zone, NR_INACTIVE_ANON);
198
199 return nr;
200}
201
202bool zone_reclaimable(struct zone *zone)
203{
204 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
205}
206
Johannes Weinerf16015f2012-01-12 17:17:52 -0800207static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
208 enum lru_list lru)
Lisa Du36abcfd2013-09-11 14:22:36 -0700209
KOSAKI Motohiroc9f299d2009-01-07 18:08:16 -0800210{
Johannes Weinerf16015f2012-01-12 17:17:52 -0800211 if (!scanning_global_lru(mz))
212 return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
213 zone_to_nid(mz->zone),
214 zone_idx(mz->zone),
215 BIT(lru));
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800216
Johannes Weinerf16015f2012-01-12 17:17:52 -0800217 return zone_page_state(mz->zone, NR_LRU_BASE + lru);
KOSAKI Motohiroc9f299d2009-01-07 18:08:16 -0800218}
219
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221/*
222 * Add a shrinker callback to be called from the vm
223 */
Rusty Russell8e1f9362007-07-17 04:03:17 -0700224void register_shrinker(struct shrinker *shrinker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
Konstantin Khlebnikov83aeead2011-12-08 14:33:54 -0800226 atomic_long_set(&shrinker->nr_in_batch, 0);
Rusty Russell8e1f9362007-07-17 04:03:17 -0700227 down_write(&shrinker_rwsem);
228 list_add_tail(&shrinker->list, &shrinker_list);
229 up_write(&shrinker_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
Rusty Russell8e1f9362007-07-17 04:03:17 -0700231EXPORT_SYMBOL(register_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233/*
234 * Remove one
235 */
Rusty Russell8e1f9362007-07-17 04:03:17 -0700236void unregister_shrinker(struct shrinker *shrinker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237{
238 down_write(&shrinker_rwsem);
239 list_del(&shrinker->list);
240 up_write(&shrinker_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241}
Rusty Russell8e1f9362007-07-17 04:03:17 -0700242EXPORT_SYMBOL(unregister_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Ying Han1495f232011-05-24 17:12:27 -0700244static inline int do_shrinker_shrink(struct shrinker *shrinker,
245 struct shrink_control *sc,
246 unsigned long nr_to_scan)
247{
248 sc->nr_to_scan = nr_to_scan;
249 return (*shrinker->shrink)(shrinker, sc);
250}
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#define SHRINK_BATCH 128
253/*
254 * Call the shrink functions to age shrinkable caches
255 *
256 * Here we assume it costs one seek to replace a lru page and that it also
257 * takes a seek to recreate a cache object. With this in mind we age equal
258 * percentages of the lru and ageable caches. This should balance the seeks
259 * generated by these structures.
260 *
Simon Arlott183ff222007-10-20 01:27:18 +0200261 * If the vm encountered mapped pages on the LRU it increase the pressure on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 * slab to avoid swapping.
263 *
264 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
265 *
266 * `lru_pages' represents the number of on-LRU pages in all the zones which
267 * are eligible for the caller's allocation attempt. It is used for balancing
268 * slab reclaim versus page reclaim.
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700269 *
270 * Returns the number of slab objects which we shrunk.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 */
Ying Hana09ed5e2011-05-24 17:12:26 -0700272unsigned long shrink_slab(struct shrink_control *shrink,
Ying Han1495f232011-05-24 17:12:27 -0700273 unsigned long nr_pages_scanned,
Ying Hana09ed5e2011-05-24 17:12:26 -0700274 unsigned long lru_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
276 struct shrinker *shrinker;
Andrew Morton69e05942006-03-22 00:08:19 -0800277 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Ying Han1495f232011-05-24 17:12:27 -0700279 if (nr_pages_scanned == 0)
280 nr_pages_scanned = SWAP_CLUSTER_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Minchan Kimf06590b2011-05-24 17:11:11 -0700282 if (!down_read_trylock(&shrinker_rwsem)) {
283 /* Assume we'll be able to shrink next time */
284 ret = 1;
285 goto out;
286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 list_for_each_entry(shrinker, &shrinker_list, list) {
289 unsigned long long delta;
Konstantin Khlebnikov635697c2011-12-08 14:33:51 -0800290 long total_scan;
291 long max_pass;
Dave Chinner09576072011-07-08 14:14:34 +1000292 int shrink_ret = 0;
Dave Chinneracf92b42011-07-08 14:14:35 +1000293 long nr;
294 long new_nr;
Dave Chinnere9299f52011-07-08 14:14:37 +1000295 long batch_size = shrinker->batch ? shrinker->batch
296 : SHRINK_BATCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Konstantin Khlebnikov635697c2011-12-08 14:33:51 -0800298 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
299 if (max_pass <= 0)
300 continue;
301
Dave Chinneracf92b42011-07-08 14:14:35 +1000302 /*
303 * copy the current shrinker scan count into a local variable
304 * and zero it so that other concurrent shrinker invocations
305 * don't also do this scanning work.
306 */
Konstantin Khlebnikov83aeead2011-12-08 14:33:54 -0800307 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
Dave Chinneracf92b42011-07-08 14:14:35 +1000308
309 total_scan = nr;
Ying Han1495f232011-05-24 17:12:27 -0700310 delta = (4 * nr_pages_scanned) / shrinker->seeks;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800311 delta *= max_pass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 do_div(delta, lru_pages + 1);
Dave Chinneracf92b42011-07-08 14:14:35 +1000313 total_scan += delta;
314 if (total_scan < 0) {
David Rientjes88c3bd72009-03-31 15:23:29 -0700315 printk(KERN_ERR "shrink_slab: %pF negative objects to "
316 "delete nr=%ld\n",
Dave Chinneracf92b42011-07-08 14:14:35 +1000317 shrinker->shrink, total_scan);
318 total_scan = max_pass;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800319 }
320
321 /*
Dave Chinner3567b592011-07-08 14:14:36 +1000322 * We need to avoid excessive windup on filesystem shrinkers
323 * due to large numbers of GFP_NOFS allocations causing the
324 * shrinkers to return -1 all the time. This results in a large
325 * nr being built up so when a shrink that can do some work
326 * comes along it empties the entire cache due to nr >>>
327 * max_pass. This is bad for sustaining a working set in
328 * memory.
329 *
330 * Hence only allow the shrinker to scan the entire cache when
331 * a large delta change is calculated directly.
332 */
333 if (delta < max_pass / 4)
334 total_scan = min(total_scan, max_pass / 2);
335
336 /*
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800337 * Avoid risking looping forever due to too large nr value:
338 * never try to free more than twice the estimate number of
339 * freeable entries.
340 */
Dave Chinneracf92b42011-07-08 14:14:35 +1000341 if (total_scan > max_pass * 2)
342 total_scan = max_pass * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Dave Chinneracf92b42011-07-08 14:14:35 +1000344 trace_mm_shrink_slab_start(shrinker, shrink, nr,
Dave Chinner09576072011-07-08 14:14:34 +1000345 nr_pages_scanned, lru_pages,
346 max_pass, delta, total_scan);
347
Dave Chinnere9299f52011-07-08 14:14:37 +1000348 while (total_scan >= batch_size) {
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700349 int nr_before;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
Ying Han1495f232011-05-24 17:12:27 -0700351 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
352 shrink_ret = do_shrinker_shrink(shrinker, shrink,
Dave Chinnere9299f52011-07-08 14:14:37 +1000353 batch_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 if (shrink_ret == -1)
355 break;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700356 if (shrink_ret < nr_before)
357 ret += nr_before - shrink_ret;
Dave Chinnere9299f52011-07-08 14:14:37 +1000358 count_vm_events(SLABS_SCANNED, batch_size);
359 total_scan -= batch_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 cond_resched();
362 }
363
Dave Chinneracf92b42011-07-08 14:14:35 +1000364 /*
365 * move the unused scan count back into the shrinker in a
366 * manner that handles concurrent updates. If we exhausted the
367 * scan, there is no need to do an update.
368 */
Konstantin Khlebnikov83aeead2011-12-08 14:33:54 -0800369 if (total_scan > 0)
370 new_nr = atomic_long_add_return(total_scan,
371 &shrinker->nr_in_batch);
372 else
373 new_nr = atomic_long_read(&shrinker->nr_in_batch);
Dave Chinneracf92b42011-07-08 14:14:35 +1000374
375 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 }
377 up_read(&shrinker_rwsem);
Minchan Kimf06590b2011-05-24 17:11:11 -0700378out:
379 cond_resched();
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700380 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Mel Gormanf3a310b2011-01-13 15:46:00 -0800383static void set_reclaim_mode(int priority, struct scan_control *sc,
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700384 bool sync)
385{
Mel Gorman2c275242012-05-29 15:06:19 -0700386 /* Sync reclaim used only for compaction */
Mel Gormanf3a310b2011-01-13 15:46:00 -0800387 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700388
389 /*
Mel Gorman2c275242012-05-29 15:06:19 -0700390 * Restrict reclaim/compaction to costly allocations or when
Mel Gorman3e7d3442011-01-13 15:45:56 -0800391 * under memory pressure
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700392 */
Mel Gorman2c275242012-05-29 15:06:19 -0700393 if (COMPACTION_BUILD && sc->order &&
394 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
395 priority < DEF_PRIORITY - 2))
396 sc->reclaim_mode = RECLAIM_MODE_COMPACTION | syncmode;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700397 else
Mel Gormanf3a310b2011-01-13 15:46:00 -0800398 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700399}
400
Mel Gormanf3a310b2011-01-13 15:46:00 -0800401static void reset_reclaim_mode(struct scan_control *sc)
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700402{
Mel Gormanf3a310b2011-01-13 15:46:00 -0800403 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700404}
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406static inline int is_page_cache_freeable(struct page *page)
407{
Johannes Weinerceddc3a2009-09-21 17:03:00 -0700408 /*
409 * A freeable page cache page is referenced only by the caller
410 * that isolated the page, the page cache radix tree and
411 * optional buffer heads at page->private.
412 */
Johannes Weineredcf4742009-09-21 17:02:59 -0700413 return page_count(page) - page_has_private(page) == 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700416static int may_write_to_queue(struct backing_dev_info *bdi,
417 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
Christoph Lameter930d9152006-01-08 01:00:47 -0800419 if (current->flags & PF_SWAPWRITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 return 1;
421 if (!bdi_write_congested(bdi))
422 return 1;
423 if (bdi == current->backing_dev_info)
424 return 1;
425 return 0;
426}
427
428/*
429 * We detected a synchronous write error writing a page out. Probably
430 * -ENOSPC. We need to propagate that into the address_space for a subsequent
431 * fsync(), msync() or close().
432 *
433 * The tricky part is that after writepage we cannot touch the mapping: nothing
434 * prevents it from being freed up. But we have a ref on the page and once
435 * that page is locked, the mapping is pinned.
436 *
437 * We're allowed to run sleeping lock_page() here because we know the caller has
438 * __GFP_FS.
439 */
440static void handle_write_error(struct address_space *mapping,
441 struct page *page, int error)
442{
Jens Axboe7eaceac2011-03-10 08:52:07 +0100443 lock_page(page);
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -0700444 if (page_mapping(page) == mapping)
445 mapping_set_error(mapping, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 unlock_page(page);
447}
448
Christoph Lameter04e62a22006-06-23 02:03:38 -0700449/* possible outcome of pageout() */
450typedef enum {
451 /* failed to write page out, page is locked */
452 PAGE_KEEP,
453 /* move page to the active list, page is locked */
454 PAGE_ACTIVATE,
455 /* page has been sent to the disk successfully, page is unlocked */
456 PAGE_SUCCESS,
457 /* page is clean and locked */
458 PAGE_CLEAN,
459} pageout_t;
460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461/*
Andrew Morton1742f192006-03-22 00:08:21 -0800462 * pageout is called by shrink_page_list() for each dirty page.
463 * Calls ->writepage().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 */
Andy Whitcroftc661b072007-08-22 14:01:26 -0700465static pageout_t pageout(struct page *page, struct address_space *mapping,
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700466 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
468 /*
469 * If the page is dirty, only perform writeback if that write
470 * will be non-blocking. To prevent this allocation from being
471 * stalled by pagecache activity. But note that there may be
472 * stalls if we need to run get_block(). We could test
473 * PagePrivate for that.
474 *
Vincent Li6aceb532009-12-14 17:58:49 -0800475 * If this process is currently in __generic_file_aio_write() against
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 * this page's queue, we can perform writeback even if that
477 * will block.
478 *
479 * If the page is swapcache, write it back even if that would
480 * block, for some throttling. This happens by accident, because
481 * swap_backing_dev_info is bust: it doesn't reflect the
482 * congestion state of the swapdevs. Easy to fix, if needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 */
484 if (!is_page_cache_freeable(page))
485 return PAGE_KEEP;
486 if (!mapping) {
487 /*
488 * Some data journaling orphaned pages can have
489 * page->mapping == NULL while being dirty with clean buffers.
490 */
David Howells266cf652009-04-03 16:42:36 +0100491 if (page_has_private(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 if (try_to_free_buffers(page)) {
493 ClearPageDirty(page);
Harvey Harrisond40cee22008-04-30 00:55:07 -0700494 printk("%s: orphaned page\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 return PAGE_CLEAN;
496 }
497 }
498 return PAGE_KEEP;
499 }
500 if (mapping->a_ops->writepage == NULL)
501 return PAGE_ACTIVATE;
Mel Gorman0e093d992010-10-26 14:21:45 -0700502 if (!may_write_to_queue(mapping->backing_dev_info, sc))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 return PAGE_KEEP;
504
505 if (clear_page_dirty_for_io(page)) {
506 int res;
507 struct writeback_control wbc = {
508 .sync_mode = WB_SYNC_NONE,
509 .nr_to_write = SWAP_CLUSTER_MAX,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700510 .range_start = 0,
511 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 .for_reclaim = 1,
513 };
514
515 SetPageReclaim(page);
516 res = mapping->a_ops->writepage(page, &wbc);
517 if (res < 0)
518 handle_write_error(mapping, page, res);
Zach Brown994fc28c2005-12-15 14:28:17 -0800519 if (res == AOP_WRITEPAGE_ACTIVATE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 ClearPageReclaim(page);
521 return PAGE_ACTIVATE;
522 }
Andy Whitcroftc661b072007-08-22 14:01:26 -0700523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 if (!PageWriteback(page)) {
525 /* synchronous write or broken a_ops? */
526 ClearPageReclaim(page);
527 }
Mel Gorman755f0222010-08-09 17:19:18 -0700528 trace_mm_vmscan_writepage(page,
Mel Gormanf3a310b2011-01-13 15:46:00 -0800529 trace_reclaim_flags(page, sc->reclaim_mode));
Andrew Mortone129b5c2006-09-27 01:50:00 -0700530 inc_zone_page_state(page, NR_VMSCAN_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 return PAGE_SUCCESS;
532 }
533
534 return PAGE_CLEAN;
535}
536
Andrew Mortona649fd92006-10-17 00:09:36 -0700537/*
Nick Piggine2867812008-07-25 19:45:30 -0700538 * Same as remove_mapping, but if the page is removed from the mapping, it
539 * gets returned with a refcount of 0.
Andrew Mortona649fd92006-10-17 00:09:36 -0700540 */
Nick Piggine2867812008-07-25 19:45:30 -0700541static int __remove_mapping(struct address_space *mapping, struct page *page)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800542{
Nick Piggin28e4d962006-09-25 23:31:23 -0700543 BUG_ON(!PageLocked(page));
544 BUG_ON(mapping != page_mapping(page));
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800545
Nick Piggin19fd6232008-07-25 19:45:32 -0700546 spin_lock_irq(&mapping->tree_lock);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800547 /*
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700548 * The non racy check for a busy page.
549 *
550 * Must be careful with the order of the tests. When someone has
551 * a ref to the page, it may be possible that they dirty it then
552 * drop the reference. So if PageDirty is tested before page_count
553 * here, then the following race may occur:
554 *
555 * get_user_pages(&page);
556 * [user mapping goes away]
557 * write_to(page);
558 * !PageDirty(page) [good]
559 * SetPageDirty(page);
560 * put_page(page);
561 * !page_count(page) [good, discard it]
562 *
563 * [oops, our write_to data is lost]
564 *
565 * Reversing the order of the tests ensures such a situation cannot
566 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
567 * load is not satisfied before that of page->_count.
568 *
569 * Note that if SetPageDirty is always performed via set_page_dirty,
570 * and thus under tree_lock, then this ordering is not required.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800571 */
Nick Piggine2867812008-07-25 19:45:30 -0700572 if (!page_freeze_refs(page, 2))
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800573 goto cannot_free;
Nick Piggine2867812008-07-25 19:45:30 -0700574 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
575 if (unlikely(PageDirty(page))) {
576 page_unfreeze_refs(page, 2);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800577 goto cannot_free;
Nick Piggine2867812008-07-25 19:45:30 -0700578 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800579
580 if (PageSwapCache(page)) {
581 swp_entry_t swap = { .val = page_private(page) };
582 __delete_from_swap_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700583 spin_unlock_irq(&mapping->tree_lock);
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700584 swapcache_free(swap, page);
Nick Piggine2867812008-07-25 19:45:30 -0700585 } else {
Linus Torvalds6072d132010-12-01 13:35:19 -0500586 void (*freepage)(struct page *);
587
588 freepage = mapping->a_ops->freepage;
589
Minchan Kime64a7822011-03-22 16:32:44 -0700590 __delete_from_page_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700591 spin_unlock_irq(&mapping->tree_lock);
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700592 mem_cgroup_uncharge_cache_page(page);
Linus Torvalds6072d132010-12-01 13:35:19 -0500593
594 if (freepage != NULL)
595 freepage(page);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800596 }
597
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800598 return 1;
599
600cannot_free:
Nick Piggin19fd6232008-07-25 19:45:32 -0700601 spin_unlock_irq(&mapping->tree_lock);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800602 return 0;
603}
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605/*
Nick Piggine2867812008-07-25 19:45:30 -0700606 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
607 * someone else has a ref on the page, abort and return 0. If it was
608 * successfully detached, return 1. Assumes the caller has a single ref on
609 * this page.
610 */
611int remove_mapping(struct address_space *mapping, struct page *page)
612{
613 if (__remove_mapping(mapping, page)) {
614 /*
615 * Unfreezing the refcount with 1 rather than 2 effectively
616 * drops the pagecache ref for us without requiring another
617 * atomic operation.
618 */
619 page_unfreeze_refs(page, 1);
620 return 1;
621 }
622 return 0;
623}
624
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700625/**
626 * putback_lru_page - put previously isolated page onto appropriate LRU list
627 * @page: page to be put back to appropriate lru list
628 *
629 * Add previously isolated @page to appropriate LRU list.
630 * Page may still be unevictable for other reasons.
631 *
632 * lru_lock must not be held, interrupts must be enabled.
633 */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700634void putback_lru_page(struct page *page)
635{
636 int lru;
637 int active = !!TestClearPageActive(page);
Lee Schermerhornbbfd28e2008-10-18 20:26:40 -0700638 int was_unevictable = PageUnevictable(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700639
640 VM_BUG_ON(PageLRU(page));
641
642redo:
643 ClearPageUnevictable(page);
644
645 if (page_evictable(page, NULL)) {
646 /*
647 * For evictable pages, we can use the cache.
648 * In event of a race, worst case is we end up with an
649 * unevictable page on [in]active list.
650 * We know how to handle that.
651 */
Johannes Weiner401a8e12009-09-21 17:02:58 -0700652 lru = active + page_lru_base_type(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700653 lru_cache_add_lru(page, lru);
654 } else {
655 /*
656 * Put unevictable pages directly on zone's unevictable
657 * list.
658 */
659 lru = LRU_UNEVICTABLE;
660 add_page_to_unevictable_list(page);
Johannes Weiner6a7b9542009-10-26 16:50:00 -0700661 /*
Minchan Kim21ee9f32011-10-31 17:09:28 -0700662 * When racing with an mlock or AS_UNEVICTABLE clearing
663 * (page is unlocked) make sure that if the other thread
664 * does not observe our setting of PG_lru and fails
Hugh Dickins24513262012-01-20 14:34:21 -0800665 * isolation/check_move_unevictable_pages,
Minchan Kim21ee9f32011-10-31 17:09:28 -0700666 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
Johannes Weiner6a7b9542009-10-26 16:50:00 -0700667 * the page back to the evictable list.
668 *
Minchan Kim21ee9f32011-10-31 17:09:28 -0700669 * The other side is TestClearPageMlocked() or shmem_lock().
Johannes Weiner6a7b9542009-10-26 16:50:00 -0700670 */
671 smp_mb();
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700672 }
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700673
674 /*
675 * page's status can change while we move it among lru. If an evictable
676 * page is on unevictable list, it never be freed. To avoid that,
677 * check after we added it to the list, again.
678 */
679 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
680 if (!isolate_lru_page(page)) {
681 put_page(page);
682 goto redo;
683 }
684 /* This means someone else dropped this page from LRU
685 * So, it will be freed or putback to LRU again. There is
686 * nothing to do here.
687 */
688 }
689
Lee Schermerhornbbfd28e2008-10-18 20:26:40 -0700690 if (was_unevictable && lru != LRU_UNEVICTABLE)
691 count_vm_event(UNEVICTABLE_PGRESCUED);
692 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
693 count_vm_event(UNEVICTABLE_PGCULLED);
694
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700695 put_page(page); /* drop ref from isolate */
696}
697
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800698enum page_references {
699 PAGEREF_RECLAIM,
700 PAGEREF_RECLAIM_CLEAN,
Johannes Weiner64574742010-03-05 13:42:22 -0800701 PAGEREF_KEEP,
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800702 PAGEREF_ACTIVATE,
703};
704
705static enum page_references page_check_references(struct page *page,
Johannes Weinerf16015f2012-01-12 17:17:52 -0800706 struct mem_cgroup_zone *mz,
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800707 struct scan_control *sc)
708{
Johannes Weiner64574742010-03-05 13:42:22 -0800709 int referenced_ptes, referenced_page;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800710 unsigned long vm_flags;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800711
Johannes Weinerf16015f2012-01-12 17:17:52 -0800712 referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
Johannes Weiner64574742010-03-05 13:42:22 -0800713 referenced_page = TestClearPageReferenced(page);
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800714
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800715 /*
716 * Mlock lost the isolation race with us. Let try_to_unmap()
717 * move the page to the unevictable list.
718 */
719 if (vm_flags & VM_LOCKED)
720 return PAGEREF_RECLAIM;
721
Johannes Weiner64574742010-03-05 13:42:22 -0800722 if (referenced_ptes) {
Michal Hocko319b5642012-05-29 15:06:45 -0700723 if (PageSwapBacked(page))
Johannes Weiner64574742010-03-05 13:42:22 -0800724 return PAGEREF_ACTIVATE;
725 /*
726 * All mapped pages start out with page table
727 * references from the instantiating fault, so we need
728 * to look twice if a mapped file page is used more
729 * than once.
730 *
731 * Mark it and spare it for another trip around the
732 * inactive list. Another page table reference will
733 * lead to its activation.
734 *
735 * Note: the mark is set for activated pages as well
736 * so that recently deactivated but used pages are
737 * quickly recovered.
738 */
739 SetPageReferenced(page);
740
Konstantin Khlebnikov34dbc672012-01-10 15:06:59 -0800741 if (referenced_page || referenced_ptes > 1)
Johannes Weiner64574742010-03-05 13:42:22 -0800742 return PAGEREF_ACTIVATE;
743
Konstantin Khlebnikovc909e992012-01-10 15:07:03 -0800744 /*
745 * Activate file-backed executable pages after first usage.
746 */
747 if (vm_flags & VM_EXEC)
748 return PAGEREF_ACTIVATE;
749
Johannes Weiner64574742010-03-05 13:42:22 -0800750 return PAGEREF_KEEP;
751 }
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800752
753 /* Reclaim if clean, defer dirty pages to writeback */
KOSAKI Motohiro2e302442010-10-26 14:21:46 -0700754 if (referenced_page && !PageSwapBacked(page))
Johannes Weiner64574742010-03-05 13:42:22 -0800755 return PAGEREF_RECLAIM_CLEAN;
756
757 return PAGEREF_RECLAIM;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800758}
759
Nick Piggine2867812008-07-25 19:45:30 -0700760/*
Andrew Morton1742f192006-03-22 00:08:21 -0800761 * shrink_page_list() returns the number of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 */
Andrew Morton1742f192006-03-22 00:08:21 -0800763static unsigned long shrink_page_list(struct list_head *page_list,
Johannes Weinerf16015f2012-01-12 17:17:52 -0800764 struct mem_cgroup_zone *mz,
Mel Gormanf84f6e22011-10-31 17:07:51 -0700765 struct scan_control *sc,
Mel Gorman92df3a72011-10-31 17:07:56 -0700766 int priority,
767 unsigned long *ret_nr_dirty,
768 unsigned long *ret_nr_writeback)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769{
770 LIST_HEAD(ret_pages);
Mel Gormanabe4c3b2010-08-09 17:19:31 -0700771 LIST_HEAD(free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 int pgactivate = 0;
Mel Gorman0e093d992010-10-26 14:21:45 -0700773 unsigned long nr_dirty = 0;
774 unsigned long nr_congested = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -0800775 unsigned long nr_reclaimed = 0;
Mel Gorman92df3a72011-10-31 17:07:56 -0700776 unsigned long nr_writeback = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 cond_resched();
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 while (!list_empty(page_list)) {
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800781 enum page_references references;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 struct address_space *mapping;
783 struct page *page;
784 int may_enter_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786 cond_resched();
787
788 page = lru_to_page(page_list);
789 list_del(&page->lru);
790
Nick Piggin529ae9a2008-08-02 12:01:03 +0200791 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 goto keep;
793
Nick Piggin725d7042006-09-25 23:30:55 -0700794 VM_BUG_ON(PageActive(page));
Johannes Weinerf16015f2012-01-12 17:17:52 -0800795 VM_BUG_ON(page_zone(page) != mz->zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797 sc->nr_scanned++;
Christoph Lameter80e43422006-02-11 17:55:53 -0800798
Nick Pigginb291f002008-10-18 20:26:44 -0700799 if (unlikely(!page_evictable(page, NULL)))
800 goto cull_mlocked;
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700801
Johannes Weinera6dc60f2009-03-31 15:19:30 -0700802 if (!sc->may_unmap && page_mapped(page))
Christoph Lameter80e43422006-02-11 17:55:53 -0800803 goto keep_locked;
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 /* Double the slab pressure for mapped and swapcache pages */
806 if (page_mapped(page) || PageSwapCache(page))
807 sc->nr_scanned++;
808
Andy Whitcroftc661b072007-08-22 14:01:26 -0700809 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
810 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
811
812 if (PageWriteback(page)) {
Mel Gorman92df3a72011-10-31 17:07:56 -0700813 nr_writeback++;
Andy Whitcroftc661b072007-08-22 14:01:26 -0700814 /*
Mel Gormana18bba02011-10-31 17:07:42 -0700815 * Synchronous reclaim cannot queue pages for
816 * writeback due to the possibility of stack overflow
817 * but if it encounters a page under writeback, wait
818 * for the IO to complete.
Andy Whitcroftc661b072007-08-22 14:01:26 -0700819 */
Mel Gormanf3a310b2011-01-13 15:46:00 -0800820 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700821 may_enter_fs)
Andy Whitcroftc661b072007-08-22 14:01:26 -0700822 wait_on_page_writeback(page);
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700823 else {
824 unlock_page(page);
Mel Gorman2c275242012-05-29 15:06:19 -0700825 goto keep_reclaim_mode;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700826 }
Andy Whitcroftc661b072007-08-22 14:01:26 -0700827 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
Johannes Weinerf16015f2012-01-12 17:17:52 -0800829 references = page_check_references(page, mz, sc);
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800830 switch (references) {
831 case PAGEREF_ACTIVATE:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 goto activate_locked;
Johannes Weiner64574742010-03-05 13:42:22 -0800833 case PAGEREF_KEEP:
834 goto keep_locked;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800835 case PAGEREF_RECLAIM:
836 case PAGEREF_RECLAIM_CLEAN:
837 ; /* try to reclaim the page below */
838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 /*
841 * Anonymous process memory has backing store?
842 * Try to allocate it some swap space here.
843 */
Nick Pigginb291f002008-10-18 20:26:44 -0700844 if (PageAnon(page) && !PageSwapCache(page)) {
Hugh Dickins63eb6b92008-11-19 15:36:37 -0800845 if (!(sc->gfp_mask & __GFP_IO))
846 goto keep_locked;
Hugh Dickinsac47b002009-01-06 14:39:39 -0800847 if (!add_to_swap(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 goto activate_locked;
Hugh Dickins63eb6b92008-11-19 15:36:37 -0800849 may_enter_fs = 1;
Nick Pigginb291f002008-10-18 20:26:44 -0700850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
854 /*
855 * The page is mapped into the page tables of one or more
856 * processes. Try to unmap it here.
857 */
858 if (page_mapped(page) && mapping) {
Andi Kleen14fa31b2009-09-16 11:50:10 +0200859 switch (try_to_unmap(page, TTU_UNMAP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 case SWAP_FAIL:
861 goto activate_locked;
862 case SWAP_AGAIN:
863 goto keep_locked;
Nick Pigginb291f002008-10-18 20:26:44 -0700864 case SWAP_MLOCK:
865 goto cull_mlocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 case SWAP_SUCCESS:
867 ; /* try to free the page below */
868 }
869 }
870
871 if (PageDirty(page)) {
Mel Gorman0e093d992010-10-26 14:21:45 -0700872 nr_dirty++;
873
Mel Gormanee728862011-10-31 17:07:38 -0700874 /*
875 * Only kswapd can writeback filesystem pages to
Mel Gormanf84f6e22011-10-31 17:07:51 -0700876 * avoid risk of stack overflow but do not writeback
877 * unless under significant pressure.
Mel Gormanee728862011-10-31 17:07:38 -0700878 */
Mel Gormanf84f6e22011-10-31 17:07:51 -0700879 if (page_is_file_cache(page) &&
880 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
Mel Gorman49ea7eb2011-10-31 17:07:59 -0700881 /*
882 * Immediately reclaim when written back.
883 * Similar in principal to deactivate_page()
884 * except we already have the page isolated
885 * and know it's dirty
886 */
887 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
888 SetPageReclaim(page);
889
Mel Gormanee728862011-10-31 17:07:38 -0700890 goto keep_locked;
891 }
892
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800893 if (references == PAGEREF_RECLAIM_CLEAN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 goto keep_locked;
Andrew Morton4dd4b922008-03-24 12:29:52 -0700895 if (!may_enter_fs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 goto keep_locked;
Christoph Lameter52a83632006-02-01 03:05:28 -0800897 if (!sc->may_writepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 goto keep_locked;
899
900 /* Page is dirty, try to write it out here */
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700901 switch (pageout(page, mapping, sc)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 case PAGE_KEEP:
Mel Gorman0e093d992010-10-26 14:21:45 -0700903 nr_congested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 goto keep_locked;
905 case PAGE_ACTIVATE:
906 goto activate_locked;
907 case PAGE_SUCCESS:
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700908 if (PageWriteback(page))
Mel Gorman2c275242012-05-29 15:06:19 -0700909 goto keep_reclaim_mode;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700910 if (PageDirty(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 goto keep;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700912
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 /*
914 * A synchronous write - probably a ramdisk. Go
915 * ahead and try to reclaim the page.
916 */
Nick Piggin529ae9a2008-08-02 12:01:03 +0200917 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 goto keep;
919 if (PageDirty(page) || PageWriteback(page))
920 goto keep_locked;
921 mapping = page_mapping(page);
922 case PAGE_CLEAN:
923 ; /* try to free the page below */
924 }
925 }
926
927 /*
928 * If the page has buffers, try to free the buffer mappings
929 * associated with this page. If we succeed we try to free
930 * the page as well.
931 *
932 * We do this even if the page is PageDirty().
933 * try_to_release_page() does not perform I/O, but it is
934 * possible for a page to have PageDirty set, but it is actually
935 * clean (all its buffers are clean). This happens if the
936 * buffers were written out directly, with submit_bh(). ext3
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700937 * will do this, as well as the blockdev mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 * try_to_release_page() will discover that cleanness and will
939 * drop the buffers and mark the page clean - it can be freed.
940 *
941 * Rarely, pages can have buffers and no ->mapping. These are
942 * the pages which were not successfully invalidated in
943 * truncate_complete_page(). We try to drop those buffers here
944 * and if that worked, and the page is no longer mapped into
945 * process address space (page_count == 1) it can be freed.
946 * Otherwise, leave the page on the LRU so it is swappable.
947 */
David Howells266cf652009-04-03 16:42:36 +0100948 if (page_has_private(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 if (!try_to_release_page(page, sc->gfp_mask))
950 goto activate_locked;
Nick Piggine2867812008-07-25 19:45:30 -0700951 if (!mapping && page_count(page) == 1) {
952 unlock_page(page);
953 if (put_page_testzero(page))
954 goto free_it;
955 else {
956 /*
957 * rare race with speculative reference.
958 * the speculative reference will free
959 * this page shortly, so we may
960 * increment nr_reclaimed here (and
961 * leave it off the LRU).
962 */
963 nr_reclaimed++;
964 continue;
965 }
966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 }
968
Nick Piggine2867812008-07-25 19:45:30 -0700969 if (!mapping || !__remove_mapping(mapping, page))
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800970 goto keep_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Nick Piggina978d6f2008-10-18 20:26:58 -0700972 /*
973 * At this point, we have no other references and there is
974 * no way to pick any more up (removed from LRU, removed
975 * from pagecache). Can use non-atomic bitops now (and
976 * we obviously don't have to worry about waking up a process
977 * waiting on the page lock, because there are no references.
978 */
979 __clear_page_locked(page);
Nick Piggine2867812008-07-25 19:45:30 -0700980free_it:
Andrew Morton05ff5132006-03-22 00:08:20 -0800981 nr_reclaimed++;
Mel Gormanabe4c3b2010-08-09 17:19:31 -0700982
983 /*
984 * Is there need to periodically free_page_list? It would
985 * appear not as the counts should be low
986 */
987 list_add(&page->lru, &free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 continue;
989
Nick Pigginb291f002008-10-18 20:26:44 -0700990cull_mlocked:
Hugh Dickins63d6c5a2009-01-06 14:39:38 -0800991 if (PageSwapCache(page))
992 try_to_free_swap(page);
Nick Pigginb291f002008-10-18 20:26:44 -0700993 unlock_page(page);
994 putback_lru_page(page);
Mel Gormanf3a310b2011-01-13 15:46:00 -0800995 reset_reclaim_mode(sc);
Nick Pigginb291f002008-10-18 20:26:44 -0700996 continue;
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998activate_locked:
Rik van Riel68a223942008-10-18 20:26:23 -0700999 /* Not a candidate for swapping, so reclaim swap space. */
1000 if (PageSwapCache(page) && vm_swap_full())
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -08001001 try_to_free_swap(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001002 VM_BUG_ON(PageActive(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 SetPageActive(page);
1004 pgactivate++;
1005keep_locked:
1006 unlock_page(page);
1007keep:
Mel Gormanf3a310b2011-01-13 15:46:00 -08001008 reset_reclaim_mode(sc);
Mel Gorman2c275242012-05-29 15:06:19 -07001009keep_reclaim_mode:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 list_add(&page->lru, &ret_pages);
Nick Pigginb291f002008-10-18 20:26:44 -07001011 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 }
Mel Gormanabe4c3b2010-08-09 17:19:31 -07001013
Mel Gorman0e093d992010-10-26 14:21:45 -07001014 /*
1015 * Tag a zone as congested if all the dirty pages encountered were
1016 * backed by a congested BDI. In this case, reclaimers should just
1017 * back off and wait for congestion to clear because further reclaim
1018 * will encounter the same problem
1019 */
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001020 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
Johannes Weinerf16015f2012-01-12 17:17:52 -08001021 zone_set_flag(mz->zone, ZONE_CONGESTED);
Mel Gorman0e093d992010-10-26 14:21:45 -07001022
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08001023 free_hot_cold_page_list(&free_pages, 1);
Mel Gormanabe4c3b2010-08-09 17:19:31 -07001024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 list_splice(&ret_pages, page_list);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001026 count_vm_events(PGACTIVATE, pgactivate);
Mel Gorman92df3a72011-10-31 17:07:56 -07001027 *ret_nr_dirty += nr_dirty;
1028 *ret_nr_writeback += nr_writeback;
Andrew Morton05ff5132006-03-22 00:08:20 -08001029 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030}
1031
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001032/*
1033 * Attempt to remove the specified page from its LRU. Only take this page
1034 * if it is of the appropriate PageActive status. Pages which are being
1035 * freed elsewhere are also ignored.
1036 *
1037 * page: page to consider
1038 * mode: one of the LRU isolation modes defined above
1039 *
1040 * returns 0 on success, -ve errno on failure.
1041 */
Minchan Kim4356f212011-10-31 17:06:47 -07001042int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001043{
Minchan Kim4356f212011-10-31 17:06:47 -07001044 bool all_lru_mode;
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001045 int ret = -EINVAL;
1046
1047 /* Only take pages on the LRU. */
1048 if (!PageLRU(page))
1049 return ret;
1050
Minchan Kim4356f212011-10-31 17:06:47 -07001051 all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
1052 (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
1053
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001054 /*
1055 * When checking the active state, we need to be sure we are
1056 * dealing with comparible boolean values. Take the logical not
1057 * of each.
1058 */
Minchan Kim4356f212011-10-31 17:06:47 -07001059 if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001060 return ret;
1061
Minchan Kim4356f212011-10-31 17:06:47 -07001062 if (!all_lru_mode && !!page_is_file_cache(page) != file)
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001063 return ret;
1064
Mel Gorman2c275242012-05-29 15:06:19 -07001065 /* Do not give back unevictable pages for compaction */
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001066 if (PageUnevictable(page))
1067 return ret;
1068
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001069 ret = -EBUSY;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001070
Mel Gormanc8244932012-01-12 17:19:38 -08001071 /*
1072 * To minimise LRU disruption, the caller can indicate that it only
1073 * wants to isolate pages it will be able to operate on without
1074 * blocking - clean pages for the most part.
1075 *
1076 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1077 * is used by reclaim when it is cannot write to backing storage
1078 *
1079 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1080 * that it is possible to migrate without blocking
1081 */
1082 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1083 /* All the caller can do on PageWriteback is block */
1084 if (PageWriteback(page))
1085 return ret;
1086
1087 if (PageDirty(page)) {
1088 struct address_space *mapping;
1089
1090 /* ISOLATE_CLEAN means only clean pages */
1091 if (mode & ISOLATE_CLEAN)
1092 return ret;
1093
1094 /*
1095 * Only pages without mappings or that have a
1096 * ->migratepage callback are possible to migrate
1097 * without blocking
1098 */
1099 mapping = page_mapping(page);
1100 if (mapping && !mapping->a_ops->migratepage)
1101 return ret;
1102 }
1103 }
Minchan Kim39deaf82011-10-31 17:06:51 -07001104
Minchan Kimf80c0672011-10-31 17:06:55 -07001105 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1106 return ret;
1107
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001108 if (likely(get_page_unless_zero(page))) {
1109 /*
1110 * Be careful not to clear PageLRU until after we're
1111 * sure the page is not being freed elsewhere -- the
1112 * page release code relies on it.
1113 */
1114 ClearPageLRU(page);
1115 ret = 0;
1116 }
1117
1118 return ret;
1119}
1120
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001121/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 * zone->lru_lock is heavily contended. Some of the functions that
1123 * shrink the lists perform better by taking out a batch of pages
1124 * and working on them outside the LRU lock.
1125 *
1126 * For pagecache intensive workloads, this function is the hottest
1127 * spot in the kernel (apart from copy_*_user functions).
1128 *
1129 * Appropriate locks must be held before calling this function.
1130 *
1131 * @nr_to_scan: The number of pages to look through on the list.
Hugh Dickinsf6260122012-01-12 17:20:06 -08001132 * @mz: The mem_cgroup_zone to pull pages from.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 * @dst: The temp list to put pages on to.
Hugh Dickinsf6260122012-01-12 17:20:06 -08001134 * @nr_scanned: The number of pages that were scanned.
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001135 * @sc: The scan_control struct for this reclaim session
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001136 * @mode: One of the LRU isolation modes
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001137 * @lru: LRU list id for isolating
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 *
1139 * returns how many pages were moved onto *@dst.
1140 */
Andrew Morton69e05942006-03-22 00:08:19 -08001141static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
Hugh Dickinsf6260122012-01-12 17:20:06 -08001142 struct mem_cgroup_zone *mz, struct list_head *dst,
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001143 unsigned long *nr_scanned, struct scan_control *sc,
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001144 isolate_mode_t mode, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Hugh Dickinsf6260122012-01-12 17:20:06 -08001146 struct lruvec *lruvec;
1147 struct list_head *src;
Andrew Morton69e05942006-03-22 00:08:19 -08001148 unsigned long nr_taken = 0;
Wu Fengguangc9b02d92006-03-22 00:08:23 -08001149 unsigned long scan;
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001150 int file = is_file_lru(lru);
Hugh Dickinsf6260122012-01-12 17:20:06 -08001151
1152 lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
Hugh Dickinsf6260122012-01-12 17:20:06 -08001153 src = &lruvec->lists[lru];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Wu Fengguangc9b02d92006-03-22 00:08:23 -08001155 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001156 struct page *page;
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 page = lru_to_page(src);
1159 prefetchw_prev_lru_page(page, src, flags);
1160
Nick Piggin725d7042006-09-25 23:30:55 -07001161 VM_BUG_ON(!PageLRU(page));
Nick Piggin8d438f92006-03-22 00:07:59 -08001162
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001163 switch (__isolate_lru_page(page, mode, file)) {
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001164 case 0:
Johannes Weiner925b7672012-01-12 17:18:15 -08001165 mem_cgroup_lru_del(page);
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001166 list_move(&page->lru, dst);
Rik van Riel2c888cf2011-01-13 15:47:13 -08001167 nr_taken += hpage_nr_pages(page);
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001168 break;
Nick Piggin46453a62006-03-22 00:07:58 -08001169
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001170 case -EBUSY:
1171 /* else it is being freed elsewhere */
1172 list_move(&page->lru, src);
1173 continue;
1174
1175 default:
1176 BUG();
1177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 }
1179
Hugh Dickinsf6260122012-01-12 17:20:06 -08001180 *nr_scanned = scan;
Mel Gormana8a94d12010-08-09 17:19:17 -07001181
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001182 trace_mm_vmscan_lru_isolate(sc->order,
Mel Gormana8a94d12010-08-09 17:19:17 -07001183 nr_to_scan, scan,
1184 nr_taken,
Tao Maea4d3492012-01-12 17:19:20 -08001185 mode, file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 return nr_taken;
1187}
1188
Nick Piggin62695a82008-10-18 20:26:09 -07001189/**
1190 * isolate_lru_page - tries to isolate a page from its LRU list
1191 * @page: page to isolate from its LRU list
1192 *
1193 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1194 * vmstat statistic corresponding to whatever LRU list the page was on.
1195 *
1196 * Returns 0 if the page was removed from an LRU list.
1197 * Returns -EBUSY if the page was not on an LRU list.
1198 *
1199 * The returned page will have PageLRU() cleared. If it was found on
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001200 * the active list, it will have PageActive set. If it was found on
1201 * the unevictable list, it will have the PageUnevictable bit set. That flag
1202 * may need to be cleared by the caller before letting the page go.
Nick Piggin62695a82008-10-18 20:26:09 -07001203 *
1204 * The vmstat statistic corresponding to the list on which the page was
1205 * found will be decremented.
1206 *
1207 * Restrictions:
1208 * (1) Must be called with an elevated refcount on the page. This is a
1209 * fundamentnal difference from isolate_lru_pages (which is called
1210 * without a stable reference).
1211 * (2) the lru_lock must not be held.
1212 * (3) interrupts must be enabled.
1213 */
1214int isolate_lru_page(struct page *page)
1215{
1216 int ret = -EBUSY;
1217
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001218 VM_BUG_ON(!page_count(page));
1219
Nick Piggin62695a82008-10-18 20:26:09 -07001220 if (PageLRU(page)) {
1221 struct zone *zone = page_zone(page);
1222
1223 spin_lock_irq(&zone->lru_lock);
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001224 if (PageLRU(page)) {
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001225 int lru = page_lru(page);
Nick Piggin62695a82008-10-18 20:26:09 -07001226 ret = 0;
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001227 get_page(page);
Nick Piggin62695a82008-10-18 20:26:09 -07001228 ClearPageLRU(page);
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001229
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001230 del_page_from_lru_list(zone, page, lru);
Nick Piggin62695a82008-10-18 20:26:09 -07001231 }
1232 spin_unlock_irq(&zone->lru_lock);
1233 }
1234 return ret;
1235}
1236
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001237/*
Rik van Riel35cd7812009-09-21 17:01:38 -07001238 * Are there way too many processes in the direct reclaim path already?
1239 */
1240static int too_many_isolated(struct zone *zone, int file,
1241 struct scan_control *sc)
1242{
1243 unsigned long inactive, isolated;
1244
1245 if (current_is_kswapd())
1246 return 0;
1247
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001248 if (!global_reclaim(sc))
Rik van Riel35cd7812009-09-21 17:01:38 -07001249 return 0;
1250
1251 if (file) {
1252 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1253 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1254 } else {
1255 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1256 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1257 }
1258
1259 return isolated > inactive;
1260}
1261
Mel Gorman66635622010-08-09 17:19:30 -07001262static noinline_for_stack void
Hugh Dickins3f797682012-01-12 17:20:07 -08001263putback_inactive_pages(struct mem_cgroup_zone *mz,
1264 struct list_head *page_list)
Mel Gorman66635622010-08-09 17:19:30 -07001265{
Johannes Weinerf16015f2012-01-12 17:17:52 -08001266 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
Hugh Dickins3f797682012-01-12 17:20:07 -08001267 struct zone *zone = mz->zone;
1268 LIST_HEAD(pages_to_free);
Mel Gorman66635622010-08-09 17:19:30 -07001269
Mel Gorman66635622010-08-09 17:19:30 -07001270 /*
1271 * Put back any unfreeable pages.
1272 */
Mel Gorman66635622010-08-09 17:19:30 -07001273 while (!list_empty(page_list)) {
Hugh Dickins3f797682012-01-12 17:20:07 -08001274 struct page *page = lru_to_page(page_list);
Mel Gorman66635622010-08-09 17:19:30 -07001275 int lru;
Hugh Dickins3f797682012-01-12 17:20:07 -08001276
Mel Gorman66635622010-08-09 17:19:30 -07001277 VM_BUG_ON(PageLRU(page));
1278 list_del(&page->lru);
1279 if (unlikely(!page_evictable(page, NULL))) {
1280 spin_unlock_irq(&zone->lru_lock);
1281 putback_lru_page(page);
1282 spin_lock_irq(&zone->lru_lock);
1283 continue;
1284 }
Linus Torvalds7a608572011-01-17 14:42:19 -08001285 SetPageLRU(page);
Mel Gorman66635622010-08-09 17:19:30 -07001286 lru = page_lru(page);
Linus Torvalds7a608572011-01-17 14:42:19 -08001287 add_page_to_lru_list(zone, page, lru);
Mel Gorman66635622010-08-09 17:19:30 -07001288 if (is_active_lru(lru)) {
1289 int file = is_file_lru(lru);
Rik van Riel9992af12011-01-13 15:47:13 -08001290 int numpages = hpage_nr_pages(page);
1291 reclaim_stat->recent_rotated[file] += numpages;
Mel Gorman66635622010-08-09 17:19:30 -07001292 }
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001293 if (put_page_testzero(page)) {
1294 __ClearPageLRU(page);
1295 __ClearPageActive(page);
1296 del_page_from_lru_list(zone, page, lru);
1297
1298 if (unlikely(PageCompound(page))) {
1299 spin_unlock_irq(&zone->lru_lock);
1300 (*get_compound_page_dtor(page))(page);
1301 spin_lock_irq(&zone->lru_lock);
1302 } else
1303 list_add(&page->lru, &pages_to_free);
Mel Gorman66635622010-08-09 17:19:30 -07001304 }
1305 }
Mel Gorman66635622010-08-09 17:19:30 -07001306
Hugh Dickins3f797682012-01-12 17:20:07 -08001307 /*
1308 * To save our caller's stack, now use input list for pages to free.
1309 */
1310 list_splice(&pages_to_free, page_list);
Mel Gorman66635622010-08-09 17:19:30 -07001311}
1312
Johannes Weinerf16015f2012-01-12 17:17:52 -08001313static noinline_for_stack void
1314update_isolated_counts(struct mem_cgroup_zone *mz,
Hugh Dickins3f797682012-01-12 17:20:07 -08001315 struct list_head *page_list,
Johannes Weinerf16015f2012-01-12 17:17:52 -08001316 unsigned long *nr_anon,
Hugh Dickins3f797682012-01-12 17:20:07 -08001317 unsigned long *nr_file)
Mel Gorman1489fa12010-08-09 17:19:33 -07001318{
Johannes Weinerf16015f2012-01-12 17:17:52 -08001319 struct zone *zone = mz->zone;
Mel Gorman1489fa12010-08-09 17:19:33 -07001320 unsigned int count[NR_LRU_LISTS] = { 0, };
Hugh Dickins3f797682012-01-12 17:20:07 -08001321 unsigned long nr_active = 0;
1322 struct page *page;
1323 int lru;
Mel Gorman1489fa12010-08-09 17:19:33 -07001324
Hugh Dickins3f797682012-01-12 17:20:07 -08001325 /*
1326 * Count pages and clear active flags
1327 */
1328 list_for_each_entry(page, page_list, lru) {
1329 int numpages = hpage_nr_pages(page);
1330 lru = page_lru_base_type(page);
1331 if (PageActive(page)) {
1332 lru += LRU_ACTIVE;
1333 ClearPageActive(page);
1334 nr_active += numpages;
1335 }
1336 count[lru] += numpages;
1337 }
1338
Hillf Dantond563c052012-03-21 16:34:02 -07001339 preempt_disable();
Mel Gorman1489fa12010-08-09 17:19:33 -07001340 __count_vm_events(PGDEACTIVATE, nr_active);
1341
1342 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1343 -count[LRU_ACTIVE_FILE]);
1344 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1345 -count[LRU_INACTIVE_FILE]);
1346 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1347 -count[LRU_ACTIVE_ANON]);
1348 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1349 -count[LRU_INACTIVE_ANON]);
1350
1351 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1352 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
Mel Gorman1489fa12010-08-09 17:19:33 -07001353
Hillf Dantond563c052012-03-21 16:34:02 -07001354 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1355 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1356 preempt_enable();
Mel Gorman1489fa12010-08-09 17:19:33 -07001357}
1358
Mel Gorman66635622010-08-09 17:19:30 -07001359/*
Mel Gormana18bba02011-10-31 17:07:42 -07001360 * Returns true if a direct reclaim should wait on pages under writeback.
Wu Fengguange31f36982010-08-09 17:20:01 -07001361 *
1362 * If we are direct reclaiming for contiguous pages and we do not reclaim
1363 * everything in the list, try again and wait for writeback IO to complete.
1364 * This will stall high-order allocations noticeably. Only do that when really
1365 * need to free the pages under high memory pressure.
1366 */
1367static inline bool should_reclaim_stall(unsigned long nr_taken,
1368 unsigned long nr_freed,
1369 int priority,
1370 struct scan_control *sc)
1371{
Mel Gorman2c275242012-05-29 15:06:19 -07001372 int stall_priority;
Wu Fengguange31f36982010-08-09 17:20:01 -07001373
1374 /* kswapd should not stall on sync IO */
1375 if (current_is_kswapd())
1376 return false;
1377
Mel Gorman2c275242012-05-29 15:06:19 -07001378 /* Only stall for memory compaction */
Mel Gormanf3a310b2011-01-13 15:46:00 -08001379 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
Wu Fengguange31f36982010-08-09 17:20:01 -07001380 return false;
1381
Justin P. Mattock81d66c72011-08-23 09:28:02 -07001382 /* If we have reclaimed everything on the isolated list, no stall */
Wu Fengguange31f36982010-08-09 17:20:01 -07001383 if (nr_freed == nr_taken)
1384 return false;
1385
1386 /*
1387 * For high-order allocations, there are two stall thresholds.
1388 * High-cost allocations stall immediately where as lower
1389 * order allocations such as stacks require the scanning
1390 * priority to be much higher before stalling.
1391 */
1392 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
Mel Gorman2c275242012-05-29 15:06:19 -07001393 stall_priority = DEF_PRIORITY;
Wu Fengguange31f36982010-08-09 17:20:01 -07001394 else
Mel Gorman2c275242012-05-29 15:06:19 -07001395 stall_priority = DEF_PRIORITY / 3;
Wu Fengguange31f36982010-08-09 17:20:01 -07001396
Mel Gorman2c275242012-05-29 15:06:19 -07001397 return priority <= stall_priority;
Wu Fengguange31f36982010-08-09 17:20:01 -07001398}
1399
1400/*
Andrew Morton1742f192006-03-22 00:08:21 -08001401 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1402 * of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 */
Mel Gorman66635622010-08-09 17:19:30 -07001404static noinline_for_stack unsigned long
Johannes Weinerf16015f2012-01-12 17:17:52 -08001405shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001406 struct scan_control *sc, int priority, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
1408 LIST_HEAD(page_list);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001409 unsigned long nr_scanned;
Andrew Morton05ff5132006-03-22 00:08:20 -08001410 unsigned long nr_reclaimed = 0;
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001411 unsigned long nr_taken;
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001412 unsigned long nr_anon;
1413 unsigned long nr_file;
Mel Gorman92df3a72011-10-31 17:07:56 -07001414 unsigned long nr_dirty = 0;
1415 unsigned long nr_writeback = 0;
Hillf Danton61317282012-03-21 16:33:48 -07001416 isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001417 int file = is_file_lru(lru);
Johannes Weinerf16015f2012-01-12 17:17:52 -08001418 struct zone *zone = mz->zone;
Hillf Dantond563c052012-03-21 16:34:02 -07001419 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
KOSAKI Motohiro78dc5832009-06-16 15:31:40 -07001420
Rik van Riel35cd7812009-09-21 17:01:38 -07001421 while (unlikely(too_many_isolated(zone, file, sc))) {
KOSAKI Motohiro58355c72009-10-26 16:49:35 -07001422 congestion_wait(BLK_RW_ASYNC, HZ/10);
Rik van Riel35cd7812009-09-21 17:01:38 -07001423
1424 /* We are about to die and free our memory. Return now. */
1425 if (fatal_signal_pending(current))
1426 return SWAP_CLUSTER_MAX;
1427 }
1428
Mel Gormanf3a310b2011-01-13 15:46:00 -08001429 set_reclaim_mode(priority, sc, false);
Minchan Kim4356f212011-10-31 17:06:47 -07001430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 lru_add_drain();
Minchan Kimf80c0672011-10-31 17:06:55 -07001432
1433 if (!sc->may_unmap)
Hillf Danton61317282012-03-21 16:33:48 -07001434 isolate_mode |= ISOLATE_UNMAPPED;
Minchan Kimf80c0672011-10-31 17:06:55 -07001435 if (!sc->may_writepage)
Hillf Danton61317282012-03-21 16:33:48 -07001436 isolate_mode |= ISOLATE_CLEAN;
Minchan Kimf80c0672011-10-31 17:06:55 -07001437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 spin_lock_irq(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001440 nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001441 sc, isolate_mode, lru);
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001442 if (global_reclaim(sc)) {
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001443 zone->pages_scanned += nr_scanned;
KOSAKI Motohirob35ea172009-09-21 17:01:36 -07001444 if (current_is_kswapd())
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001445 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1446 nr_scanned);
1447 else
1448 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1449 nr_scanned);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001450 }
Hillf Dantond563c052012-03-21 16:34:02 -07001451 spin_unlock_irq(&zone->lru_lock);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07001452
Hillf Dantond563c052012-03-21 16:34:02 -07001453 if (nr_taken == 0)
Mel Gorman66635622010-08-09 17:19:30 -07001454 return 0;
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001455
Hugh Dickins3f797682012-01-12 17:20:07 -08001456 update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
1457
Johannes Weinerf16015f2012-01-12 17:17:52 -08001458 nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
Mel Gorman92df3a72011-10-31 17:07:56 -07001459 &nr_dirty, &nr_writeback);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001460
Wu Fengguange31f36982010-08-09 17:20:01 -07001461 /* Check if we should syncronously wait for writeback */
1462 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
Mel Gormanf3a310b2011-01-13 15:46:00 -08001463 set_reclaim_mode(priority, sc, true);
Johannes Weinerf16015f2012-01-12 17:17:52 -08001464 nr_reclaimed += shrink_page_list(&page_list, mz, sc,
Mel Gorman92df3a72011-10-31 17:07:56 -07001465 priority, &nr_dirty, &nr_writeback);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001466 }
1467
Hugh Dickins3f797682012-01-12 17:20:07 -08001468 spin_lock_irq(&zone->lru_lock);
1469
Hillf Dantond563c052012-03-21 16:34:02 -07001470 reclaim_stat->recent_scanned[0] += nr_anon;
1471 reclaim_stat->recent_scanned[1] += nr_file;
1472
Ying Han904249a2012-04-25 16:01:48 -07001473 if (global_reclaim(sc)) {
1474 if (current_is_kswapd())
1475 __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1476 nr_reclaimed);
1477 else
1478 __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1479 nr_reclaimed);
1480 }
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001481
Hugh Dickins3f797682012-01-12 17:20:07 -08001482 putback_inactive_pages(mz, &page_list);
1483
1484 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1485 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1486
1487 spin_unlock_irq(&zone->lru_lock);
1488
1489 free_hot_cold_page_list(&page_list, 1);
Mel Gormane11da5b2010-10-26 14:21:40 -07001490
Mel Gorman92df3a72011-10-31 17:07:56 -07001491 /*
1492 * If reclaim is isolating dirty pages under writeback, it implies
1493 * that the long-lived page allocation rate is exceeding the page
1494 * laundering rate. Either the global limits are not being effective
1495 * at throttling processes due to the page distribution throughout
1496 * zones or there is heavy usage of a slow backing device. The
1497 * only option is to throttle from reclaim context which is not ideal
1498 * as there is no guarantee the dirtying process is throttled in the
1499 * same way balance_dirty_pages() manages.
1500 *
1501 * This scales the number of dirty pages that must be under writeback
1502 * before throttling depending on priority. It is a simple backoff
1503 * function that has the most effect in the range DEF_PRIORITY to
1504 * DEF_PRIORITY-2 which is the priority reclaim is considered to be
1505 * in trouble and reclaim is considered to be in trouble.
1506 *
1507 * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
1508 * DEF_PRIORITY-1 50% must be PageWriteback
1509 * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
1510 * ...
1511 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
1512 * isolated page is PageWriteback
1513 */
1514 if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
1515 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1516
Mel Gormane11da5b2010-10-26 14:21:40 -07001517 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1518 zone_idx(zone),
1519 nr_scanned, nr_reclaimed,
1520 priority,
Mel Gormanf3a310b2011-01-13 15:46:00 -08001521 trace_shrink_flags(file, sc->reclaim_mode));
Andrew Morton05ff5132006-03-22 00:08:20 -08001522 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523}
1524
Martin Bligh3bb1a852006-10-28 10:38:24 -07001525/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 * This moves pages from the active list to the inactive list.
1527 *
1528 * We move them the other way if the page is referenced by one or more
1529 * processes, from rmap.
1530 *
1531 * If the pages are mostly unmapped, the processing is fast and it is
1532 * appropriate to hold zone->lru_lock across the whole operation. But if
1533 * the pages are mapped, the processing is slow (page_referenced()) so we
1534 * should drop zone->lru_lock around each page. It's impossible to balance
1535 * this, so instead we remove the pages from the LRU while processing them.
1536 * It is safe to rely on PG_active against the non-LRU pages in here because
1537 * nobody will play with that bit on a non-LRU page.
1538 *
1539 * The downside is that we have to touch page->_count against each page.
1540 * But we had to alter page->flags anyway.
1541 */
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001542
Wu Fengguang3eb41402009-06-16 15:33:13 -07001543static void move_active_pages_to_lru(struct zone *zone,
1544 struct list_head *list,
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001545 struct list_head *pages_to_free,
Wu Fengguang3eb41402009-06-16 15:33:13 -07001546 enum lru_list lru)
1547{
1548 unsigned long pgmoved = 0;
Wu Fengguang3eb41402009-06-16 15:33:13 -07001549 struct page *page;
1550
Wu Fengguang3eb41402009-06-16 15:33:13 -07001551 while (!list_empty(list)) {
Johannes Weiner925b7672012-01-12 17:18:15 -08001552 struct lruvec *lruvec;
1553
Wu Fengguang3eb41402009-06-16 15:33:13 -07001554 page = lru_to_page(list);
Wu Fengguang3eb41402009-06-16 15:33:13 -07001555
1556 VM_BUG_ON(PageLRU(page));
1557 SetPageLRU(page);
1558
Johannes Weiner925b7672012-01-12 17:18:15 -08001559 lruvec = mem_cgroup_lru_add_list(zone, page, lru);
1560 list_move(&page->lru, &lruvec->lists[lru]);
Rik van Riel2c888cf2011-01-13 15:47:13 -08001561 pgmoved += hpage_nr_pages(page);
Wu Fengguang3eb41402009-06-16 15:33:13 -07001562
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001563 if (put_page_testzero(page)) {
1564 __ClearPageLRU(page);
1565 __ClearPageActive(page);
1566 del_page_from_lru_list(zone, page, lru);
1567
1568 if (unlikely(PageCompound(page))) {
1569 spin_unlock_irq(&zone->lru_lock);
1570 (*get_compound_page_dtor(page))(page);
1571 spin_lock_irq(&zone->lru_lock);
1572 } else
1573 list_add(&page->lru, pages_to_free);
Wu Fengguang3eb41402009-06-16 15:33:13 -07001574 }
1575 }
1576 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1577 if (!is_active_lru(lru))
1578 __count_vm_events(PGDEACTIVATE, pgmoved);
1579}
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001580
Hugh Dickinsf6260122012-01-12 17:20:06 -08001581static void shrink_active_list(unsigned long nr_to_scan,
Johannes Weinerf16015f2012-01-12 17:17:52 -08001582 struct mem_cgroup_zone *mz,
1583 struct scan_control *sc,
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001584 int priority, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585{
KOSAKI Motohiro44c241f2009-09-21 17:01:35 -07001586 unsigned long nr_taken;
Hugh Dickinsf6260122012-01-12 17:20:06 -08001587 unsigned long nr_scanned;
Wu Fengguang6fe6b7e2009-06-16 15:33:05 -07001588 unsigned long vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 LIST_HEAD(l_hold); /* The pages which were snipped off */
Wu Fengguang8cab4752009-06-16 15:33:12 -07001590 LIST_HEAD(l_active);
Christoph Lameterb69408e2008-10-18 20:26:14 -07001591 LIST_HEAD(l_inactive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 struct page *page;
Johannes Weinerf16015f2012-01-12 17:17:52 -08001593 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
KOSAKI Motohiro44c241f2009-09-21 17:01:35 -07001594 unsigned long nr_rotated = 0;
Hillf Danton61317282012-03-21 16:33:48 -07001595 isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001596 int file = is_file_lru(lru);
Johannes Weinerf16015f2012-01-12 17:17:52 -08001597 struct zone *zone = mz->zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599 lru_add_drain();
Minchan Kimf80c0672011-10-31 17:06:55 -07001600
Konstantin Khlebnikov1480de02012-03-21 16:34:17 -07001601 reset_reclaim_mode(sc);
1602
Minchan Kimf80c0672011-10-31 17:06:55 -07001603 if (!sc->may_unmap)
Hillf Danton61317282012-03-21 16:33:48 -07001604 isolate_mode |= ISOLATE_UNMAPPED;
Minchan Kimf80c0672011-10-31 17:06:55 -07001605 if (!sc->may_writepage)
Hillf Danton61317282012-03-21 16:33:48 -07001606 isolate_mode |= ISOLATE_CLEAN;
Minchan Kimf80c0672011-10-31 17:06:55 -07001607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 spin_lock_irq(&zone->lru_lock);
Johannes Weiner925b7672012-01-12 17:18:15 -08001609
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001610 nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001611 isolate_mode, lru);
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001612 if (global_reclaim(sc))
Hugh Dickinsf6260122012-01-12 17:20:06 -08001613 zone->pages_scanned += nr_scanned;
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001614
Johannes Weinerb7c46d12009-09-21 17:02:56 -07001615 reclaim_stat->recent_scanned[file] += nr_taken;
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001616
Hugh Dickinsf6260122012-01-12 17:20:06 -08001617 __count_zone_vm_events(PGREFILL, zone, nr_scanned);
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001618 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07001619 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 spin_unlock_irq(&zone->lru_lock);
1621
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 while (!list_empty(&l_hold)) {
1623 cond_resched();
1624 page = lru_to_page(&l_hold);
1625 list_del(&page->lru);
Rik van Riel7e9cd482008-10-18 20:26:35 -07001626
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001627 if (unlikely(!page_evictable(page, NULL))) {
1628 putback_lru_page(page);
1629 continue;
1630 }
1631
Mel Gormancc715d92012-03-21 16:34:00 -07001632 if (unlikely(buffer_heads_over_limit)) {
1633 if (page_has_private(page) && trylock_page(page)) {
1634 if (page_has_private(page))
1635 try_to_release_page(page, 0);
1636 unlock_page(page);
1637 }
1638 }
1639
Johannes Weinerf16015f2012-01-12 17:17:52 -08001640 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
Rik van Riel9992af12011-01-13 15:47:13 -08001641 nr_rotated += hpage_nr_pages(page);
Wu Fengguang8cab4752009-06-16 15:33:12 -07001642 /*
1643 * Identify referenced, file-backed active pages and
1644 * give them one more trip around the active list. So
1645 * that executable code get better chances to stay in
1646 * memory under moderate memory pressure. Anon pages
1647 * are not likely to be evicted by use-once streaming
1648 * IO, plus JVM can create lots of anon VM_EXEC pages,
1649 * so we ignore them here.
1650 */
Wu Fengguang41e20982009-10-26 16:49:53 -07001651 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
Wu Fengguang8cab4752009-06-16 15:33:12 -07001652 list_add(&page->lru, &l_active);
1653 continue;
1654 }
1655 }
Rik van Riel7e9cd482008-10-18 20:26:35 -07001656
KOSAKI Motohiro5205e562009-09-21 17:01:44 -07001657 ClearPageActive(page); /* we are de-activating */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 list_add(&page->lru, &l_inactive);
1659 }
1660
Andrew Mortonb5557492009-01-06 14:40:13 -08001661 /*
Wu Fengguang8cab4752009-06-16 15:33:12 -07001662 * Move pages back to the lru list.
Andrew Mortonb5557492009-01-06 14:40:13 -08001663 */
Johannes Weiner2a1dc502008-12-01 03:00:35 +01001664 spin_lock_irq(&zone->lru_lock);
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001665 /*
Wu Fengguang8cab4752009-06-16 15:33:12 -07001666 * Count referenced pages from currently used mappings as rotated,
1667 * even though only some of them are actually re-activated. This
1668 * helps balance scan pressure between file and anonymous pages in
1669 * get_scan_ratio.
Rik van Riel7e9cd482008-10-18 20:26:35 -07001670 */
Johannes Weinerb7c46d12009-09-21 17:02:56 -07001671 reclaim_stat->recent_rotated[file] += nr_rotated;
Rik van Riel556adec2008-10-18 20:26:34 -07001672
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001673 move_active_pages_to_lru(zone, &l_active, &l_hold, lru);
1674 move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07001675 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001676 spin_unlock_irq(&zone->lru_lock);
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001677
1678 free_hot_cold_page_list(&l_hold, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679}
1680
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001681#ifdef CONFIG_SWAP
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001682static int inactive_anon_is_low_global(struct zone *zone)
KOSAKI Motohirof89eb902009-01-07 18:08:14 -08001683{
1684 unsigned long active, inactive;
1685
1686 active = zone_page_state(zone, NR_ACTIVE_ANON);
1687 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1688
1689 if (inactive * zone->inactive_ratio < active)
1690 return 1;
1691
1692 return 0;
1693}
1694
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001695/**
1696 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1697 * @zone: zone to check
1698 * @sc: scan control of this context
1699 *
1700 * Returns true if the zone does not have enough inactive anon pages,
1701 * meaning some active anon pages need to be deactivated.
1702 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001703static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001704{
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001705 /*
1706 * If we don't have swap space, anonymous page deactivation
1707 * is pointless.
1708 */
1709 if (!total_swap_pages)
1710 return 0;
1711
Johannes Weinerf16015f2012-01-12 17:17:52 -08001712 if (!scanning_global_lru(mz))
1713 return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
1714 mz->zone);
1715
1716 return inactive_anon_is_low_global(mz->zone);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001717}
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001718#else
Johannes Weinerf16015f2012-01-12 17:17:52 -08001719static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001720{
1721 return 0;
1722}
1723#endif
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001724
Rik van Riel56e49d22009-06-16 15:32:28 -07001725static int inactive_file_is_low_global(struct zone *zone)
1726{
1727 unsigned long active, inactive;
1728
1729 active = zone_page_state(zone, NR_ACTIVE_FILE);
1730 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1731
1732 return (active > inactive);
1733}
1734
1735/**
1736 * inactive_file_is_low - check if file pages need to be deactivated
Johannes Weinerf16015f2012-01-12 17:17:52 -08001737 * @mz: memory cgroup and zone to check
Rik van Riel56e49d22009-06-16 15:32:28 -07001738 *
1739 * When the system is doing streaming IO, memory pressure here
1740 * ensures that active file pages get deactivated, until more
1741 * than half of the file pages are on the inactive list.
1742 *
1743 * Once we get to that situation, protect the system's working
1744 * set from being evicted by disabling active file page aging.
1745 *
1746 * This uses a different ratio than the anonymous pages, because
1747 * the page cache uses a use-once replacement algorithm.
1748 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001749static int inactive_file_is_low(struct mem_cgroup_zone *mz)
Rik van Riel56e49d22009-06-16 15:32:28 -07001750{
Johannes Weinerf16015f2012-01-12 17:17:52 -08001751 if (!scanning_global_lru(mz))
1752 return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
1753 mz->zone);
Rik van Riel56e49d22009-06-16 15:32:28 -07001754
Johannes Weinerf16015f2012-01-12 17:17:52 -08001755 return inactive_file_is_low_global(mz->zone);
Rik van Riel56e49d22009-06-16 15:32:28 -07001756}
1757
Johannes Weinerf16015f2012-01-12 17:17:52 -08001758static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
Rik van Rielb39415b2009-12-14 17:59:48 -08001759{
1760 if (file)
Johannes Weinerf16015f2012-01-12 17:17:52 -08001761 return inactive_file_is_low(mz);
Rik van Rielb39415b2009-12-14 17:59:48 -08001762 else
Johannes Weinerf16015f2012-01-12 17:17:52 -08001763 return inactive_anon_is_low(mz);
Rik van Rielb39415b2009-12-14 17:59:48 -08001764}
1765
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001766static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
Johannes Weinerf16015f2012-01-12 17:17:52 -08001767 struct mem_cgroup_zone *mz,
1768 struct scan_control *sc, int priority)
Christoph Lameterb69408e2008-10-18 20:26:14 -07001769{
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001770 int file = is_file_lru(lru);
1771
Rik van Rielb39415b2009-12-14 17:59:48 -08001772 if (is_active_lru(lru)) {
Johannes Weinerf16015f2012-01-12 17:17:52 -08001773 if (inactive_list_is_low(mz, file))
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001774 shrink_active_list(nr_to_scan, mz, sc, priority, lru);
Rik van Riel556adec2008-10-18 20:26:34 -07001775 return 0;
1776 }
1777
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07001778 return shrink_inactive_list(nr_to_scan, mz, sc, priority, lru);
Christoph Lameterb69408e2008-10-18 20:26:14 -07001779}
1780
Johannes Weinerf16015f2012-01-12 17:17:52 -08001781static int vmscan_swappiness(struct mem_cgroup_zone *mz,
1782 struct scan_control *sc)
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001783{
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001784 if (global_reclaim(sc))
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001785 return vm_swappiness;
Johannes Weinerf16015f2012-01-12 17:17:52 -08001786 return mem_cgroup_swappiness(mz->mem_cgroup);
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001787}
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789/*
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001790 * Determine how aggressively the anon and file LRU lists should be
1791 * scanned. The relative value of each set of LRU lists is determined
1792 * by looking at the fraction of the pages scanned we did rotate back
1793 * onto the active list instead of evict.
1794 *
Shaohua Li76a33fc2010-05-24 14:32:36 -07001795 * nr[0] = anon pages to scan; nr[1] = file pages to scan
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001796 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001797static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1798 unsigned long *nr, int priority)
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001799{
1800 unsigned long anon, file, free;
1801 unsigned long anon_prio, file_prio;
1802 unsigned long ap, fp;
Johannes Weinerf16015f2012-01-12 17:17:52 -08001803 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
Shaohua Li76a33fc2010-05-24 14:32:36 -07001804 u64 fraction[2], denominator;
Hugh Dickins41113042012-01-12 17:20:01 -08001805 enum lru_list lru;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001806 int noswap = 0;
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001807 bool force_scan = false;
KAMEZAWA Hiroyuki246e87a2011-05-26 16:25:34 -07001808
Johannes Weinerf11c0ca2011-10-31 17:07:27 -07001809 /*
1810 * If the zone or memcg is small, nr[l] can be 0. This
1811 * results in no scanning on this priority and a potential
1812 * priority drop. Global direct reclaim can go to the next
1813 * zone and tends to have no problems. Global kswapd is for
1814 * zone balancing and it needs to scan a minimum amount. When
1815 * reclaiming for a memcg, a priority drop can cause high
1816 * latencies, so it's better to scan a minimum amount there as
1817 * well.
1818 */
Lisa Du36abcfd2013-09-11 14:22:36 -07001819 if (current_is_kswapd() && !zone_reclaimable(mz->zone))
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001820 force_scan = true;
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001821 if (!global_reclaim(sc))
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001822 force_scan = true;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001823
1824 /* If we have no swap space, do not bother scanning anon pages. */
Shaohua Lid1c2fbe2013-02-22 16:34:38 -08001825 if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
Shaohua Li76a33fc2010-05-24 14:32:36 -07001826 noswap = 1;
1827 fraction[0] = 0;
1828 fraction[1] = 1;
1829 denominator = 1;
1830 goto out;
1831 }
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001832
Johannes Weinerf16015f2012-01-12 17:17:52 -08001833 anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
1834 zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
1835 file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
1836 zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001837
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001838 if (global_reclaim(sc)) {
Johannes Weinerf16015f2012-01-12 17:17:52 -08001839 free = zone_page_state(mz->zone, NR_FREE_PAGES);
KOSAKI Motohiroeeee9a82009-01-07 18:08:17 -08001840 /* If we have very few page cache pages,
1841 force-scan anon pages. */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001842 if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
Shaohua Li76a33fc2010-05-24 14:32:36 -07001843 fraction[0] = 1;
1844 fraction[1] = 0;
1845 denominator = 1;
1846 goto out;
KOSAKI Motohiroeeee9a82009-01-07 18:08:17 -08001847 }
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001848 }
1849
1850 /*
KOSAKI Motohiro58c37f62010-08-09 17:19:51 -07001851 * With swappiness at 100, anonymous and file have the same priority.
1852 * This scanning priority is essentially the inverse of IO cost.
1853 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001854 anon_prio = vmscan_swappiness(mz, sc);
1855 file_prio = 200 - vmscan_swappiness(mz, sc);
KOSAKI Motohiro58c37f62010-08-09 17:19:51 -07001856
1857 /*
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001858 * OK, so we have swap space and a fair amount of page cache
1859 * pages. We use the recently rotated / recently scanned
1860 * ratios to determine how valuable each cache is.
1861 *
1862 * Because workloads change over time (and to avoid overflow)
1863 * we keep these statistics as a floating average, which ends
1864 * up weighing recent references more than old ones.
1865 *
1866 * anon in [0], file in [1]
1867 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001868 spin_lock_irq(&mz->zone->lru_lock);
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001869 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001870 reclaim_stat->recent_scanned[0] /= 2;
1871 reclaim_stat->recent_rotated[0] /= 2;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001872 }
1873
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001874 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001875 reclaim_stat->recent_scanned[1] /= 2;
1876 reclaim_stat->recent_rotated[1] /= 2;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001877 }
1878
1879 /*
Rik van Riel00d80892008-11-19 15:36:44 -08001880 * The amount of pressure on anon vs file pages is inversely
1881 * proportional to the fraction of recently scanned pages on
1882 * each list that were recently referenced and in active use.
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001883 */
Satoru Moriya49194d42012-05-29 15:06:47 -07001884 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001885 ap /= reclaim_stat->recent_rotated[0] + 1;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001886
Satoru Moriya49194d42012-05-29 15:06:47 -07001887 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001888 fp /= reclaim_stat->recent_rotated[1] + 1;
Johannes Weinerf16015f2012-01-12 17:17:52 -08001889 spin_unlock_irq(&mz->zone->lru_lock);
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001890
Shaohua Li76a33fc2010-05-24 14:32:36 -07001891 fraction[0] = ap;
1892 fraction[1] = fp;
1893 denominator = ap + fp + 1;
1894out:
Hugh Dickins41113042012-01-12 17:20:01 -08001895 for_each_evictable_lru(lru) {
1896 int file = is_file_lru(lru);
Shaohua Li76a33fc2010-05-24 14:32:36 -07001897 unsigned long scan;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001898
Hugh Dickins41113042012-01-12 17:20:01 -08001899 scan = zone_nr_lru_pages(mz, lru);
Satoru Moriya49194d42012-05-29 15:06:47 -07001900 if (priority || noswap || !vmscan_swappiness(mz, sc)) {
Shaohua Li76a33fc2010-05-24 14:32:36 -07001901 scan >>= priority;
Johannes Weinerf11c0ca2011-10-31 17:07:27 -07001902 if (!scan && force_scan)
1903 scan = SWAP_CLUSTER_MAX;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001904 scan = div64_u64(scan * fraction[file], denominator);
1905 }
Hugh Dickins41113042012-01-12 17:20:01 -08001906 nr[lru] = scan;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001907 }
Wu Fengguang6e08a362009-06-16 15:32:29 -07001908}
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001909
1910/*
Mel Gorman3e7d3442011-01-13 15:45:56 -08001911 * Reclaim/compaction depends on a number of pages being freed. To avoid
1912 * disruption to the system, a small number of order-0 pages continue to be
1913 * rotated and reclaimed in the normal fashion. However, by the time we get
1914 * back to the allocator and call try_to_compact_zone(), we ensure that
1915 * there are enough free pages for it to be likely successful
1916 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001917static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
Mel Gorman3e7d3442011-01-13 15:45:56 -08001918 unsigned long nr_reclaimed,
1919 unsigned long nr_scanned,
1920 struct scan_control *sc)
1921{
1922 unsigned long pages_for_compaction;
1923 unsigned long inactive_lru_pages;
1924
1925 /* If not in reclaim/compaction mode, stop */
Mel Gormanf3a310b2011-01-13 15:46:00 -08001926 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
Mel Gorman3e7d3442011-01-13 15:45:56 -08001927 return false;
1928
Mel Gorman28765922011-02-25 14:44:20 -08001929 /* Consider stopping depending on scan and reclaim activity */
1930 if (sc->gfp_mask & __GFP_REPEAT) {
1931 /*
1932 * For __GFP_REPEAT allocations, stop reclaiming if the
1933 * full LRU list has been scanned and we are still failing
1934 * to reclaim pages. This full LRU scan is potentially
1935 * expensive but a __GFP_REPEAT caller really wants to succeed
1936 */
1937 if (!nr_reclaimed && !nr_scanned)
1938 return false;
1939 } else {
1940 /*
1941 * For non-__GFP_REPEAT allocations which can presumably
1942 * fail without consequence, stop if we failed to reclaim
1943 * any pages from the last SWAP_CLUSTER_MAX number of
1944 * pages that were scanned. This will return to the
1945 * caller faster at the risk reclaim/compaction and
1946 * the resulting allocation attempt fails
1947 */
1948 if (!nr_reclaimed)
1949 return false;
1950 }
Mel Gorman3e7d3442011-01-13 15:45:56 -08001951
1952 /*
1953 * If we have not reclaimed enough pages for compaction and the
1954 * inactive lists are large enough, continue reclaiming
1955 */
1956 pages_for_compaction = (2UL << sc->order);
Johannes Weinerf16015f2012-01-12 17:17:52 -08001957 inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
Shaohua Lid1c2fbe2013-02-22 16:34:38 -08001958 if (get_nr_swap_pages() > 0)
Johannes Weinerf16015f2012-01-12 17:17:52 -08001959 inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
Mel Gorman3e7d3442011-01-13 15:45:56 -08001960 if (sc->nr_reclaimed < pages_for_compaction &&
1961 inactive_lru_pages > pages_for_compaction)
1962 return true;
1963
1964 /* If compaction would go ahead or the allocation would succeed, stop */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001965 switch (compaction_suitable(mz->zone, sc->order)) {
Mel Gorman3e7d3442011-01-13 15:45:56 -08001966 case COMPACT_PARTIAL:
1967 case COMPACT_CONTINUE:
1968 return false;
1969 default:
1970 return true;
1971 }
1972}
1973
1974/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1976 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08001977static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
1978 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979{
Christoph Lameterb69408e2008-10-18 20:26:14 -07001980 unsigned long nr[NR_LRU_LISTS];
Christoph Lameter86959492006-03-22 00:08:18 -08001981 unsigned long nr_to_scan;
Hugh Dickins41113042012-01-12 17:20:01 -08001982 enum lru_list lru;
Johannes Weinerf0fdc5e2011-02-10 15:01:34 -08001983 unsigned long nr_reclaimed, nr_scanned;
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08001984 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
Shaohua Li3da367c2011-10-31 17:07:03 -07001985 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Mel Gorman3e7d3442011-01-13 15:45:56 -08001987restart:
1988 nr_reclaimed = 0;
Johannes Weinerf0fdc5e2011-02-10 15:01:34 -08001989 nr_scanned = sc->nr_scanned;
Johannes Weinerf16015f2012-01-12 17:17:52 -08001990 get_scan_count(mz, sc, nr, priority);
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001991
Shaohua Li3da367c2011-10-31 17:07:03 -07001992 blk_start_plug(&plug);
Rik van Riel556adec2008-10-18 20:26:34 -07001993 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1994 nr[LRU_INACTIVE_FILE]) {
Hugh Dickins41113042012-01-12 17:20:01 -08001995 for_each_evictable_lru(lru) {
1996 if (nr[lru]) {
KOSAKI Motohiroece74b22009-12-14 17:59:14 -08001997 nr_to_scan = min_t(unsigned long,
Hugh Dickins41113042012-01-12 17:20:01 -08001998 nr[lru], SWAP_CLUSTER_MAX);
1999 nr[lru] -= nr_to_scan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
Hugh Dickins41113042012-01-12 17:20:01 -08002001 nr_reclaimed += shrink_list(lru, nr_to_scan,
Johannes Weinerf16015f2012-01-12 17:17:52 -08002002 mz, sc, priority);
Christoph Lameterb69408e2008-10-18 20:26:14 -07002003 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 }
Rik van Riela79311c2009-01-06 14:40:01 -08002005 /*
2006 * On large memory systems, scan >> priority can become
2007 * really large. This is fine for the starting priority;
2008 * we want to put equal scanning pressure on each zone.
2009 * However, if the VM has a harder time of freeing pages,
2010 * with multiple processes reclaiming pages, the total
2011 * freeing target can get unreasonably large.
2012 */
Ying Han41c93082012-04-12 12:49:16 -07002013 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
Rik van Riela79311c2009-01-06 14:40:01 -08002014 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 }
Shaohua Li3da367c2011-10-31 17:07:03 -07002016 blk_finish_plug(&plug);
Mel Gorman3e7d3442011-01-13 15:45:56 -08002017 sc->nr_reclaimed += nr_reclaimed;
KOSAKI Motohiro01dbe5c2009-01-06 14:40:02 -08002018
Rik van Riel556adec2008-10-18 20:26:34 -07002019 /*
2020 * Even if we did not try to evict anon pages at all, we want to
2021 * rebalance the anon lru active/inactive ratio.
2022 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08002023 if (inactive_anon_is_low(mz))
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07002024 shrink_active_list(SWAP_CLUSTER_MAX, mz,
2025 sc, priority, LRU_ACTIVE_ANON);
Rik van Riel556adec2008-10-18 20:26:34 -07002026
Mel Gorman3e7d3442011-01-13 15:45:56 -08002027 /* reclaim/compaction might need reclaim to continue */
Johannes Weinerf16015f2012-01-12 17:17:52 -08002028 if (should_continue_reclaim(mz, nr_reclaimed,
Mel Gorman3e7d3442011-01-13 15:45:56 -08002029 sc->nr_scanned - nr_scanned, sc))
2030 goto restart;
2031
Andrew Morton232ea4d2007-02-28 20:13:21 -08002032 throttle_vm_writeout(sc->gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033}
2034
Johannes Weinerf16015f2012-01-12 17:17:52 -08002035static void shrink_zone(int priority, struct zone *zone,
2036 struct scan_control *sc)
2037{
Johannes Weiner56600482012-01-12 17:17:59 -08002038 struct mem_cgroup *root = sc->target_mem_cgroup;
2039 struct mem_cgroup_reclaim_cookie reclaim = {
Johannes Weinerf16015f2012-01-12 17:17:52 -08002040 .zone = zone,
Johannes Weiner56600482012-01-12 17:17:59 -08002041 .priority = priority,
Johannes Weinerf16015f2012-01-12 17:17:52 -08002042 };
Johannes Weiner56600482012-01-12 17:17:59 -08002043 struct mem_cgroup *memcg;
Johannes Weinerf16015f2012-01-12 17:17:52 -08002044
Johannes Weiner56600482012-01-12 17:17:59 -08002045 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2046 do {
2047 struct mem_cgroup_zone mz = {
2048 .mem_cgroup = memcg,
2049 .zone = zone,
2050 };
2051
2052 shrink_mem_cgroup_zone(priority, &mz, sc);
2053 /*
2054 * Limit reclaim has historically picked one memcg and
2055 * scanned it with decreasing priority levels until
2056 * nr_to_reclaim had been reclaimed. This priority
2057 * cycle is thus over after a single memcg.
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002058 *
2059 * Direct reclaim and kswapd, on the other hand, have
2060 * to scan all memory cgroups to fulfill the overall
2061 * scan target for the zone.
Johannes Weiner56600482012-01-12 17:17:59 -08002062 */
2063 if (!global_reclaim(sc)) {
2064 mem_cgroup_iter_break(root, memcg);
2065 break;
2066 }
2067 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2068 } while (memcg);
Johannes Weinerf16015f2012-01-12 17:17:52 -08002069}
2070
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002071/* Returns true if compaction should go ahead for a high-order request */
2072static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2073{
2074 unsigned long balance_gap, watermark;
2075 bool watermark_ok;
2076
2077 /* Do not consider compaction for orders reclaim is meant to satisfy */
2078 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
2079 return false;
2080
2081 /*
2082 * Compaction takes time to run and there are potentially other
2083 * callers using the pages just freed. Continue reclaiming until
2084 * there is a buffer of free pages available to give compaction
2085 * a reasonable chance of completing and allocating the page
2086 */
2087 balance_gap = min(low_wmark_pages(zone),
2088 (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2089 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2090 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
2091 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
2092
2093 /*
2094 * If compaction is deferred, reclaim up to a point where
2095 * compaction will have a chance of success when re-enabled
2096 */
Rik van Rielaff62242012-03-21 16:33:52 -07002097 if (compaction_deferred(zone, sc->order))
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002098 return watermark_ok;
2099
2100 /* If compaction is not ready to start, keep reclaiming */
2101 if (!compaction_suitable(zone, sc->order))
2102 return false;
2103
2104 return watermark_ok;
2105}
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107/*
2108 * This is the direct reclaim path, for page-allocating processes. We only
2109 * try to reclaim pages from zones which will satisfy the caller's allocation
2110 * request.
2111 *
Mel Gorman41858962009-06-16 15:32:12 -07002112 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
2113 * Because:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2115 * allocation or
Mel Gorman41858962009-06-16 15:32:12 -07002116 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
2117 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
2118 * zone defense algorithm.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 * If a zone is deemed to be full of pinned pages then just give it a light
2121 * scan then give up on it.
Mel Gormane0c23272011-10-31 17:09:33 -07002122 *
2123 * This function returns true if a zone is being reclaimed for a costly
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002124 * high-order allocation and compaction is ready to begin. This indicates to
Mel Gorman0cee34f2012-01-12 17:19:49 -08002125 * the caller that it should consider retrying the allocation instead of
2126 * further reclaim.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 */
Mel Gormane0c23272011-10-31 17:09:33 -07002128static bool shrink_zones(int priority, struct zonelist *zonelist,
Andrew Morton05ff5132006-03-22 00:08:20 -08002129 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130{
Mel Gormandd1a2392008-04-28 02:12:17 -07002131 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002132 struct zone *zone;
Ying Hand149e3b2011-05-26 16:25:27 -07002133 unsigned long nr_soft_reclaimed;
2134 unsigned long nr_soft_scanned;
Mel Gorman0cee34f2012-01-12 17:19:49 -08002135 bool aborted_reclaim = false;
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002136
Mel Gormancc715d92012-03-21 16:34:00 -07002137 /*
2138 * If the number of buffer_heads in the machine exceeds the maximum
2139 * allowed level, force direct reclaim to scan the highmem zone as
2140 * highmem pages could be pinning lowmem pages storing buffer_heads
2141 */
2142 if (buffer_heads_over_limit)
2143 sc->gfp_mask |= __GFP_HIGHMEM;
2144
Mel Gormand4debc62010-08-09 17:19:29 -07002145 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2146 gfp_zone(sc->gfp_mask), sc->nodemask) {
Con Kolivasf3fe6512006-01-06 00:11:15 -08002147 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 continue;
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002149 /*
2150 * Take care memory controller reclaiming has small influence
2151 * to global LRU.
2152 */
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002153 if (global_reclaim(sc)) {
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002154 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2155 continue;
Lisa Du36abcfd2013-09-11 14:22:36 -07002156 if (priority != DEF_PRIORITY &&
2157 !zone_reclaimable(zone))
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002158 continue; /* Let kswapd poll it */
Rik van Riele0887c12011-10-31 17:09:31 -07002159 if (COMPACTION_BUILD) {
2160 /*
Mel Gormane0c23272011-10-31 17:09:33 -07002161 * If we already have plenty of memory free for
2162 * compaction in this zone, don't free any more.
2163 * Even though compaction is invoked for any
2164 * non-zero order, only frequent costly order
2165 * reclamation is disruptive enough to become a
Copot Alexandruc7cfa372012-03-21 16:34:10 -07002166 * noticeable problem, like transparent huge
2167 * page allocations.
Rik van Riele0887c12011-10-31 17:09:31 -07002168 */
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002169 if (compaction_ready(zone, sc)) {
Mel Gorman0cee34f2012-01-12 17:19:49 -08002170 aborted_reclaim = true;
Rik van Riele0887c12011-10-31 17:09:31 -07002171 continue;
Mel Gormane0c23272011-10-31 17:09:33 -07002172 }
Rik van Riele0887c12011-10-31 17:09:31 -07002173 }
KAMEZAWA Hiroyukiac34a1a2011-06-27 16:18:12 -07002174 /*
2175 * This steals pages from memory cgroups over softlimit
2176 * and returns the number of reclaimed pages and
2177 * scanned pages. This works for global memory pressure
2178 * and balancing, not for a memcg's limit.
2179 */
2180 nr_soft_scanned = 0;
2181 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2182 sc->order, sc->gfp_mask,
2183 &nr_soft_scanned);
2184 sc->nr_reclaimed += nr_soft_reclaimed;
2185 sc->nr_scanned += nr_soft_scanned;
2186 /* need some check for avoid more shrink_zone() */
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002187 }
Nick Piggin408d8542006-09-25 23:31:27 -07002188
Rik van Riela79311c2009-01-06 14:40:01 -08002189 shrink_zone(priority, zone, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 }
Mel Gormane0c23272011-10-31 17:09:33 -07002191
Mel Gorman0cee34f2012-01-12 17:19:49 -08002192 return aborted_reclaim;
Minchan Kimd1908362010-09-22 13:05:01 -07002193}
2194
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07002195/* All zones in zonelist are unreclaimable? */
Minchan Kimd1908362010-09-22 13:05:01 -07002196static bool all_unreclaimable(struct zonelist *zonelist,
2197 struct scan_control *sc)
2198{
2199 struct zoneref *z;
2200 struct zone *zone;
Minchan Kimd1908362010-09-22 13:05:01 -07002201
2202 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2203 gfp_zone(sc->gfp_mask), sc->nodemask) {
2204 if (!populated_zone(zone))
2205 continue;
2206 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2207 continue;
Lisa Du36abcfd2013-09-11 14:22:36 -07002208 if (zone_reclaimable(zone))
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07002209 return false;
Minchan Kimd1908362010-09-22 13:05:01 -07002210 }
2211
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07002212 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002214
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215/*
2216 * This is the main entry point to direct page reclaim.
2217 *
2218 * If a full scan of the inactive list fails to free enough memory then we
2219 * are "out of memory" and something needs to be killed.
2220 *
2221 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2222 * high - the zone may be full of dirty or under-writeback pages, which this
Jens Axboe5b0830c2009-09-23 19:37:09 +02002223 * caller can't do much about. We kick the writeback threads and take explicit
2224 * naps in the hope that some of these pages can be written. But if the
2225 * allocating task holds filesystem locks which prevent writeout this might not
2226 * work, and the allocation attempt will fail.
Nishanth Aravamudana41f24e2008-04-29 00:58:25 -07002227 *
2228 * returns: 0, if no pages reclaimed
2229 * else, the number of pages reclaimed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 */
Mel Gormandac1d272008-04-28 02:12:12 -07002231static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
Ying Hana09ed5e2011-05-24 17:12:26 -07002232 struct scan_control *sc,
2233 struct shrink_control *shrink)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234{
2235 int priority;
Andrew Morton69e05942006-03-22 00:08:19 -08002236 unsigned long total_scanned = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 struct reclaim_state *reclaim_state = current->reclaim_state;
Mel Gormandd1a2392008-04-28 02:12:17 -07002238 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002239 struct zone *zone;
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002240 unsigned long writeback_threshold;
Mel Gorman0cee34f2012-01-12 17:19:49 -08002241 bool aborted_reclaim;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
Keika Kobayashi873b4772008-07-25 01:48:52 -07002243 delayacct_freepages_start();
2244
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002245 if (global_reclaim(sc))
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002246 count_vm_event(ALLOCSTALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
Balbir Singh66e17072008-02-07 00:13:56 -08002249 sc->nr_scanned = 0;
Rik van Rielf7b7fd82005-11-28 13:44:07 -08002250 if (!priority)
Johannes Weinerf16015f2012-01-12 17:17:52 -08002251 disable_swap_token(sc->target_mem_cgroup);
Mel Gorman0cee34f2012-01-12 17:19:49 -08002252 aborted_reclaim = shrink_zones(priority, zonelist, sc);
Mel Gormane0c23272011-10-31 17:09:33 -07002253
Balbir Singh66e17072008-02-07 00:13:56 -08002254 /*
2255 * Don't shrink slabs when reclaiming memory from
2256 * over limit cgroups
2257 */
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002258 if (global_reclaim(sc)) {
KOSAKI Motohiroc6a8a8c2010-08-09 17:19:14 -07002259 unsigned long lru_pages = 0;
Mel Gormand4debc62010-08-09 17:19:29 -07002260 for_each_zone_zonelist(zone, z, zonelist,
2261 gfp_zone(sc->gfp_mask)) {
KOSAKI Motohiroc6a8a8c2010-08-09 17:19:14 -07002262 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2263 continue;
2264
2265 lru_pages += zone_reclaimable_pages(zone);
2266 }
2267
Ying Han1495f232011-05-24 17:12:27 -07002268 shrink_slab(shrink, sc->nr_scanned, lru_pages);
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -08002269 if (reclaim_state) {
Rik van Riela79311c2009-01-06 14:40:01 -08002270 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -08002271 reclaim_state->reclaimed_slab = 0;
2272 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 }
Balbir Singh66e17072008-02-07 00:13:56 -08002274 total_scanned += sc->nr_scanned;
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002275 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
2278 /*
2279 * Try to write back as many pages as we just scanned. This
2280 * tends to cause slow streaming writers to write data to the
2281 * disk smoothly, at the dirtying rate, which is nice. But
2282 * that's undesirable in laptop mode, where we *want* lumpy
2283 * writeout. So in laptop mode, write out the whole world.
2284 */
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002285 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2286 if (total_scanned > writeback_threshold) {
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -06002287 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2288 WB_REASON_TRY_TO_FREE_PAGES);
Balbir Singh66e17072008-02-07 00:13:56 -08002289 sc->may_writepage = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 }
2291
2292 /* Take a nap, wait for some writeback to complete */
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08002293 if (!sc->hibernation_mode && sc->nr_scanned &&
Mel Gorman0e093d992010-10-26 14:21:45 -07002294 priority < DEF_PRIORITY - 2) {
2295 struct zone *preferred_zone;
2296
2297 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
David Rientjesf33261d2011-01-25 15:07:20 -08002298 &cpuset_current_mems_allowed,
2299 &preferred_zone);
Mel Gorman0e093d992010-10-26 14:21:45 -07002300 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 }
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304out:
Keika Kobayashi873b4772008-07-25 01:48:52 -07002305 delayacct_freepages_end();
2306
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002307 if (sc->nr_reclaimed)
2308 return sc->nr_reclaimed;
2309
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07002310 /*
2311 * As hibernation is going on, kswapd is freezed so that it can't mark
2312 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2313 * check.
2314 */
2315 if (oom_killer_disabled)
2316 return 0;
2317
Mel Gorman0cee34f2012-01-12 17:19:49 -08002318 /* Aborted reclaim to try compaction? don't OOM, then */
2319 if (aborted_reclaim)
Mel Gorman73350842012-01-12 17:19:33 -08002320 return 1;
2321
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002322 /* top priority shrink_zones still had more to do? don't OOM, then */
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002323 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002324 return 1;
2325
2326 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327}
2328
Mel Gormandac1d272008-04-28 02:12:12 -07002329unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -07002330 gfp_t gfp_mask, nodemask_t *nodemask)
Balbir Singh66e17072008-02-07 00:13:56 -08002331{
Mel Gorman33906bc2010-08-09 17:19:16 -07002332 unsigned long nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08002333 struct scan_control sc = {
2334 .gfp_mask = gfp_mask,
2335 .may_writepage = !laptop_mode,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002336 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Johannes Weinera6dc60f2009-03-31 15:19:30 -07002337 .may_unmap = 1,
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07002338 .may_swap = 1,
Balbir Singh66e17072008-02-07 00:13:56 -08002339 .order = order,
Johannes Weinerf16015f2012-01-12 17:17:52 -08002340 .target_mem_cgroup = NULL,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -07002341 .nodemask = nodemask,
Balbir Singh66e17072008-02-07 00:13:56 -08002342 };
Ying Hana09ed5e2011-05-24 17:12:26 -07002343 struct shrink_control shrink = {
2344 .gfp_mask = sc.gfp_mask,
2345 };
Balbir Singh66e17072008-02-07 00:13:56 -08002346
Mel Gorman33906bc2010-08-09 17:19:16 -07002347 trace_mm_vmscan_direct_reclaim_begin(order,
2348 sc.may_writepage,
2349 gfp_mask);
2350
Ying Hana09ed5e2011-05-24 17:12:26 -07002351 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
Mel Gorman33906bc2010-08-09 17:19:16 -07002352
2353 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2354
2355 return nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08002356}
2357
Balbir Singh00f0b822008-03-04 14:28:39 -08002358#ifdef CONFIG_CGROUP_MEM_RES_CTLR
Balbir Singh66e17072008-02-07 00:13:56 -08002359
Johannes Weiner72835c82012-01-12 17:18:32 -08002360unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
Balbir Singh4e416952009-09-23 15:56:39 -07002361 gfp_t gfp_mask, bool noswap,
Ying Han0ae5e892011-05-26 16:25:25 -07002362 struct zone *zone,
2363 unsigned long *nr_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -07002364{
2365 struct scan_control sc = {
Ying Han0ae5e892011-05-26 16:25:25 -07002366 .nr_scanned = 0,
KOSAKI Motohirob8f5c562010-08-10 18:03:02 -07002367 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Balbir Singh4e416952009-09-23 15:56:39 -07002368 .may_writepage = !laptop_mode,
2369 .may_unmap = 1,
2370 .may_swap = !noswap,
Balbir Singh4e416952009-09-23 15:56:39 -07002371 .order = 0,
Johannes Weiner72835c82012-01-12 17:18:32 -08002372 .target_mem_cgroup = memcg,
Balbir Singh4e416952009-09-23 15:56:39 -07002373 };
Johannes Weiner56600482012-01-12 17:17:59 -08002374 struct mem_cgroup_zone mz = {
Johannes Weiner72835c82012-01-12 17:18:32 -08002375 .mem_cgroup = memcg,
Johannes Weiner56600482012-01-12 17:17:59 -08002376 .zone = zone,
2377 };
Ying Han0ae5e892011-05-26 16:25:25 -07002378
Balbir Singh4e416952009-09-23 15:56:39 -07002379 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2380 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002381
2382 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
2383 sc.may_writepage,
2384 sc.gfp_mask);
2385
Balbir Singh4e416952009-09-23 15:56:39 -07002386 /*
2387 * NOTE: Although we can get the priority field, using it
2388 * here is not a good idea, since it limits the pages we can scan.
2389 * if we don't reclaim here, the shrink_zone from balance_pgdat
2390 * will pick up pages from other mem cgroup's as well. We hack
2391 * the priority and make it zero.
2392 */
Johannes Weiner56600482012-01-12 17:17:59 -08002393 shrink_mem_cgroup_zone(0, &mz, &sc);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002394
2395 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2396
Ying Han0ae5e892011-05-26 16:25:25 -07002397 *nr_scanned = sc.nr_scanned;
Balbir Singh4e416952009-09-23 15:56:39 -07002398 return sc.nr_reclaimed;
2399}
2400
Johannes Weiner72835c82012-01-12 17:18:32 -08002401unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08002402 gfp_t gfp_mask,
Johannes Weiner185efc02011-09-14 16:21:58 -07002403 bool noswap)
Balbir Singh66e17072008-02-07 00:13:56 -08002404{
Balbir Singh4e416952009-09-23 15:56:39 -07002405 struct zonelist *zonelist;
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002406 unsigned long nr_reclaimed;
Ying Han889976d2011-05-26 16:25:33 -07002407 int nid;
Balbir Singh66e17072008-02-07 00:13:56 -08002408 struct scan_control sc = {
Balbir Singh66e17072008-02-07 00:13:56 -08002409 .may_writepage = !laptop_mode,
Johannes Weinera6dc60f2009-03-31 15:19:30 -07002410 .may_unmap = 1,
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07002411 .may_swap = !noswap,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002412 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Balbir Singh66e17072008-02-07 00:13:56 -08002413 .order = 0,
Johannes Weiner72835c82012-01-12 17:18:32 -08002414 .target_mem_cgroup = memcg,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -07002415 .nodemask = NULL, /* we don't care the placement */
Ying Hana09ed5e2011-05-24 17:12:26 -07002416 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2417 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2418 };
2419 struct shrink_control shrink = {
2420 .gfp_mask = sc.gfp_mask,
Balbir Singh66e17072008-02-07 00:13:56 -08002421 };
Balbir Singh66e17072008-02-07 00:13:56 -08002422
Ying Han889976d2011-05-26 16:25:33 -07002423 /*
2424 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2425 * take care of from where we get pages. So the node where we start the
2426 * scan does not need to be the current node.
2427 */
Johannes Weiner72835c82012-01-12 17:18:32 -08002428 nid = mem_cgroup_select_victim_node(memcg);
Ying Han889976d2011-05-26 16:25:33 -07002429
2430 zonelist = NODE_DATA(nid)->node_zonelists;
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002431
2432 trace_mm_vmscan_memcg_reclaim_begin(0,
2433 sc.may_writepage,
2434 sc.gfp_mask);
2435
Ying Hana09ed5e2011-05-24 17:12:26 -07002436 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002437
2438 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2439
2440 return nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08002441}
2442#endif
2443
Johannes Weinerf16015f2012-01-12 17:17:52 -08002444static void age_active_anon(struct zone *zone, struct scan_control *sc,
2445 int priority)
2446{
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002447 struct mem_cgroup *memcg;
Johannes Weinerf16015f2012-01-12 17:17:52 -08002448
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002449 if (!total_swap_pages)
2450 return;
2451
2452 memcg = mem_cgroup_iter(NULL, NULL, NULL);
2453 do {
2454 struct mem_cgroup_zone mz = {
2455 .mem_cgroup = memcg,
2456 .zone = zone,
2457 };
2458
2459 if (inactive_anon_is_low(&mz))
2460 shrink_active_list(SWAP_CLUSTER_MAX, &mz,
Konstantin Khlebnikov09f85952012-05-29 15:06:53 -07002461 sc, priority, LRU_ACTIVE_ANON);
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002462
2463 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2464 } while (memcg);
Johannes Weinerf16015f2012-01-12 17:17:52 -08002465}
2466
Mel Gorman1741c872011-01-13 15:46:21 -08002467/*
2468 * pgdat_balanced is used when checking if a node is balanced for high-order
2469 * allocations. Only zones that meet watermarks and are in a zone allowed
2470 * by the callers classzone_idx are added to balanced_pages. The total of
2471 * balanced pages must be at least 25% of the zones allowed by classzone_idx
2472 * for the node to be considered balanced. Forcing all zones to be balanced
2473 * for high orders can cause excessive reclaim when there are imbalanced zones.
2474 * The choice of 25% is due to
2475 * o a 16M DMA zone that is balanced will not balance a zone on any
2476 * reasonable sized machine
2477 * o On all other machines, the top zone must be at least a reasonable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002478 * percentage of the middle zones. For example, on 32-bit x86, highmem
Mel Gorman1741c872011-01-13 15:46:21 -08002479 * would need to be at least 256M for it to be balance a whole node.
2480 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2481 * to balance a node on its own. These seemed like reasonable ratios.
2482 */
2483static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
2484 int classzone_idx)
2485{
2486 unsigned long present_pages = 0;
2487 int i;
2488
2489 for (i = 0; i <= classzone_idx; i++)
2490 present_pages += pgdat->node_zones[i].present_pages;
2491
Shaohua Li4746efd2011-07-19 08:49:26 -07002492 /* A special case here: if zone has no page, we think it's balanced */
2493 return balanced_pages >= (present_pages >> 2);
Mel Gorman1741c872011-01-13 15:46:21 -08002494}
2495
Mel Gormanf50de2d2009-12-14 17:58:53 -08002496/* is kswapd sleeping prematurely? */
Mel Gormandc83edd2011-01-13 15:46:26 -08002497static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2498 int classzone_idx)
Mel Gormanf50de2d2009-12-14 17:58:53 -08002499{
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002500 int i;
Mel Gorman1741c872011-01-13 15:46:21 -08002501 unsigned long balanced = 0;
2502 bool all_zones_ok = true;
Mel Gormanf50de2d2009-12-14 17:58:53 -08002503
2504 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2505 if (remaining)
Mel Gormandc83edd2011-01-13 15:46:26 -08002506 return true;
Mel Gormanf50de2d2009-12-14 17:58:53 -08002507
Mel Gorman0abdee22011-01-13 15:46:22 -08002508 /* Check the watermark levels */
Mel Gorman08951e52011-07-08 15:39:36 -07002509 for (i = 0; i <= classzone_idx; i++) {
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002510 struct zone *zone = pgdat->node_zones + i;
2511
2512 if (!populated_zone(zone))
2513 continue;
2514
Mel Gorman355b09c2011-01-13 15:46:24 -08002515 /*
2516 * balance_pgdat() skips over all_unreclaimable after
2517 * DEF_PRIORITY. Effectively, it considers them balanced so
2518 * they must be considered balanced here as well if kswapd
2519 * is to sleep
2520 */
Lisa Du36abcfd2013-09-11 14:22:36 -07002521 if (!zone_reclaimable(zone)) {
Mel Gorman355b09c2011-01-13 15:46:24 -08002522 balanced += zone->present_pages;
KOSAKI Motohirode3fab32010-01-15 17:01:25 -08002523 continue;
Mel Gorman355b09c2011-01-13 15:46:24 -08002524 }
KOSAKI Motohirode3fab32010-01-15 17:01:25 -08002525
Mel Gorman88f5acf2011-01-13 15:45:41 -08002526 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
Mel Gormanda175d02011-07-08 15:39:39 -07002527 i, 0))
Mel Gorman1741c872011-01-13 15:46:21 -08002528 all_zones_ok = false;
2529 else
2530 balanced += zone->present_pages;
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002531 }
Mel Gormanf50de2d2009-12-14 17:58:53 -08002532
Mel Gorman1741c872011-01-13 15:46:21 -08002533 /*
2534 * For high-order requests, the balanced zones must contain at least
2535 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
2536 * must be balanced
2537 */
2538 if (order)
Johannes Weinerafc7e322011-05-24 17:11:09 -07002539 return !pgdat_balanced(pgdat, balanced, classzone_idx);
Mel Gorman1741c872011-01-13 15:46:21 -08002540 else
2541 return !all_zones_ok;
Mel Gormanf50de2d2009-12-14 17:58:53 -08002542}
2543
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544/*
2545 * For kswapd, balance_pgdat() will work across all this node's zones until
Mel Gorman41858962009-06-16 15:32:12 -07002546 * they are all at high_wmark_pages(zone).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 *
Mel Gorman0abdee22011-01-13 15:46:22 -08002548 * Returns the final order kswapd was reclaiming at
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 *
2550 * There is special handling here for zones which are full of pinned pages.
2551 * This can happen if the pages are all mlocked, or if they are all used by
2552 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2553 * What we do is to detect the case where all pages in the zone have been
2554 * scanned twice and there has been zero successful reclaim. Mark the zone as
2555 * dead and from now on, only perform a short scan. Basically we're polling
2556 * the zone for when the problem goes away.
2557 *
2558 * kswapd scans the zones in the highmem->normal->dma direction. It skips
Mel Gorman41858962009-06-16 15:32:12 -07002559 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2560 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2561 * lower zones regardless of the number of free pages in the lower zones. This
2562 * interoperates with the page allocator fallback scheme to ensure that aging
2563 * of pages is balanced across the zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 */
Mel Gorman99504742011-01-13 15:46:20 -08002565static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
Mel Gormandc83edd2011-01-13 15:46:26 -08002566 int *classzone_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 int all_zones_ok;
Mel Gorman1741c872011-01-13 15:46:21 -08002569 unsigned long balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 int priority;
2571 int i;
Mel Gorman99504742011-01-13 15:46:20 -08002572 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
Andrew Morton69e05942006-03-22 00:08:19 -08002573 unsigned long total_scanned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 struct reclaim_state *reclaim_state = current->reclaim_state;
Ying Han0ae5e892011-05-26 16:25:25 -07002575 unsigned long nr_soft_reclaimed;
2576 unsigned long nr_soft_scanned;
Andrew Morton179e9632006-03-22 00:08:18 -08002577 struct scan_control sc = {
2578 .gfp_mask = GFP_KERNEL,
Johannes Weinera6dc60f2009-03-31 15:19:30 -07002579 .may_unmap = 1,
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07002580 .may_swap = 1,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002581 /*
2582 * kswapd doesn't want to be bailed out while reclaim. because
2583 * we want to put equal scanning pressure on each zone.
2584 */
2585 .nr_to_reclaim = ULONG_MAX,
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07002586 .order = order,
Johannes Weinerf16015f2012-01-12 17:17:52 -08002587 .target_mem_cgroup = NULL,
Andrew Morton179e9632006-03-22 00:08:18 -08002588 };
Ying Hana09ed5e2011-05-24 17:12:26 -07002589 struct shrink_control shrink = {
2590 .gfp_mask = sc.gfp_mask,
2591 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592loop_again:
2593 total_scanned = 0;
Rik van Riela79311c2009-01-06 14:40:01 -08002594 sc.nr_reclaimed = 0;
Christoph Lameterc0bbbc72006-06-11 15:22:26 -07002595 sc.may_writepage = !laptop_mode;
Christoph Lameterf8891e52006-06-30 01:55:45 -07002596 count_vm_event(PAGEOUTRUN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 unsigned long lru_pages = 0;
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002600 int has_under_min_watermark_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601
Rik van Rielf7b7fd82005-11-28 13:44:07 -08002602 /* The swap token gets in the way of swapout... */
2603 if (!priority)
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07002604 disable_swap_token(NULL);
Rik van Rielf7b7fd82005-11-28 13:44:07 -08002605
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 all_zones_ok = 1;
Mel Gorman1741c872011-01-13 15:46:21 -08002607 balanced = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002609 /*
2610 * Scan in the highmem->dma direction for the highest
2611 * zone which needs scanning
2612 */
2613 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2614 struct zone *zone = pgdat->node_zones + i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002616 if (!populated_zone(zone))
2617 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618
Lisa Du36abcfd2013-09-11 14:22:36 -07002619 if (priority != DEF_PRIORITY &&
2620 !zone_reclaimable(zone))
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002621 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
Rik van Riel556adec2008-10-18 20:26:34 -07002623 /*
2624 * Do some background aging of the anon list, to give
2625 * pages a chance to be referenced before reclaiming.
2626 */
Johannes Weinerf16015f2012-01-12 17:17:52 -08002627 age_active_anon(zone, &sc, priority);
Rik van Riel556adec2008-10-18 20:26:34 -07002628
Mel Gormancc715d92012-03-21 16:34:00 -07002629 /*
2630 * If the number of buffer_heads in the machine
2631 * exceeds the maximum allowed level and this node
2632 * has a highmem zone, force kswapd to reclaim from
2633 * it to relieve lowmem pressure.
2634 */
2635 if (buffer_heads_over_limit && is_highmem_idx(i)) {
2636 end_zone = i;
2637 break;
2638 }
2639
Mel Gorman88f5acf2011-01-13 15:45:41 -08002640 if (!zone_watermark_ok_safe(zone, order,
Mel Gorman41858962009-06-16 15:32:12 -07002641 high_wmark_pages(zone), 0, 0)) {
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002642 end_zone = i;
Andrew Mortone1dbeda2006-12-06 20:32:01 -08002643 break;
Shaohua Li439423f2011-08-25 15:59:12 -07002644 } else {
2645 /* If balanced, clear the congested flag */
2646 zone_clear_flag(zone, ZONE_CONGESTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 }
Andrew Mortone1dbeda2006-12-06 20:32:01 -08002649 if (i < 0)
2650 goto out;
2651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 for (i = 0; i <= end_zone; i++) {
2653 struct zone *zone = pgdat->node_zones + i;
2654
Wu Fengguangadea02a2009-09-21 17:01:42 -07002655 lru_pages += zone_reclaimable_pages(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 }
2657
2658 /*
2659 * Now scan the zone in the dma->highmem direction, stopping
2660 * at the last zone which needs scanning.
2661 *
2662 * We do this because the page allocator works in the opposite
2663 * direction. This prevents the page allocator from allocating
2664 * pages behind kswapd's direction of progress, which would
2665 * cause too much scanning of the lower zones.
2666 */
2667 for (i = 0; i <= end_zone; i++) {
2668 struct zone *zone = pgdat->node_zones + i;
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002669 int nr_slab, testorder;
Mel Gorman8afdcec2011-03-22 16:33:04 -07002670 unsigned long balance_gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
Con Kolivasf3fe6512006-01-06 00:11:15 -08002672 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 continue;
2674
Lisa Du36abcfd2013-09-11 14:22:36 -07002675 if (priority != DEF_PRIORITY &&
2676 !zone_reclaimable(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 continue;
2678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 sc.nr_scanned = 0;
Balbir Singh4e416952009-09-23 15:56:39 -07002680
Ying Han0ae5e892011-05-26 16:25:25 -07002681 nr_soft_scanned = 0;
Balbir Singh4e416952009-09-23 15:56:39 -07002682 /*
2683 * Call soft limit reclaim before calling shrink_zone.
Balbir Singh4e416952009-09-23 15:56:39 -07002684 */
Ying Han0ae5e892011-05-26 16:25:25 -07002685 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2686 order, sc.gfp_mask,
2687 &nr_soft_scanned);
2688 sc.nr_reclaimed += nr_soft_reclaimed;
2689 total_scanned += nr_soft_scanned;
KOSAKI Motohiro00918b62010-08-10 18:03:05 -07002690
Rik van Riel32a43302007-10-16 01:24:50 -07002691 /*
Mel Gorman8afdcec2011-03-22 16:33:04 -07002692 * We put equal pressure on every zone, unless
2693 * one zone has way too many pages free
2694 * already. The "too many pages" is defined
2695 * as the high wmark plus a "gap" where the
2696 * gap is either the low watermark or 1%
2697 * of the zone, whichever is smaller.
Rik van Riel32a43302007-10-16 01:24:50 -07002698 */
Mel Gorman8afdcec2011-03-22 16:33:04 -07002699 balance_gap = min(low_wmark_pages(zone),
2700 (zone->present_pages +
2701 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2702 KSWAPD_ZONE_BALANCE_GAP_RATIO);
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002703 /*
2704 * Kswapd reclaims only single pages with compaction
2705 * enabled. Trying too hard to reclaim until contiguous
2706 * free pages have become available can hurt performance
2707 * by evicting too much useful data from memory.
2708 * Do not reclaim more than needed for compaction.
2709 */
2710 testorder = order;
2711 if (COMPACTION_BUILD && order &&
2712 compaction_suitable(zone, order) !=
2713 COMPACT_SKIPPED)
2714 testorder = 0;
2715
Mel Gormancc715d92012-03-21 16:34:00 -07002716 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
Hugh Dickins643ac9f2012-03-23 02:57:31 -07002717 !zone_watermark_ok_safe(zone, testorder,
Mel Gorman8afdcec2011-03-22 16:33:04 -07002718 high_wmark_pages(zone) + balance_gap,
Mel Gormand7868da2011-07-08 15:39:38 -07002719 end_zone, 0)) {
Rik van Riela79311c2009-01-06 14:40:01 -08002720 shrink_zone(priority, zone, &sc);
Andrea Arcangeli5a03b052011-01-13 15:47:11 -08002721
Mel Gormand7868da2011-07-08 15:39:38 -07002722 reclaim_state->reclaimed_slab = 0;
2723 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2724 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2725 total_scanned += sc.nr_scanned;
2726
Mel Gormand7868da2011-07-08 15:39:38 -07002727 }
2728
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 /*
2730 * If we've done a decent amount of scanning and
2731 * the reclaim ratio is low, start doing writepage
2732 * even in laptop mode
2733 */
2734 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
Rik van Riela79311c2009-01-06 14:40:01 -08002735 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 sc.may_writepage = 1;
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002737
Lisa Du36abcfd2013-09-11 14:22:36 -07002738 if (!zone_reclaimable(zone)) {
Mel Gorman215ddd62011-07-08 15:39:40 -07002739 if (end_zone && end_zone == i)
2740 end_zone--;
Mel Gormand7868da2011-07-08 15:39:38 -07002741 continue;
Mel Gorman215ddd62011-07-08 15:39:40 -07002742 }
Mel Gormand7868da2011-07-08 15:39:38 -07002743
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002744 if (!zone_watermark_ok_safe(zone, testorder,
Minchan Kim45973d72010-03-05 13:41:45 -08002745 high_wmark_pages(zone), end_zone, 0)) {
2746 all_zones_ok = 0;
2747 /*
2748 * We are still under min water mark. This
2749 * means that we have a GFP_ATOMIC allocation
2750 * failure risk. Hurry up!
2751 */
Mel Gorman88f5acf2011-01-13 15:45:41 -08002752 if (!zone_watermark_ok_safe(zone, order,
Minchan Kim45973d72010-03-05 13:41:45 -08002753 min_wmark_pages(zone), end_zone, 0))
2754 has_under_min_watermark_zone = 1;
Mel Gorman0e093d992010-10-26 14:21:45 -07002755 } else {
2756 /*
2757 * If a zone reaches its high watermark,
2758 * consider it to be no longer congested. It's
2759 * possible there are dirty pages backed by
2760 * congested BDIs but as pressure is relieved,
2761 * spectulatively avoid congestion waits
2762 */
2763 zone_clear_flag(zone, ZONE_CONGESTED);
Mel Gormandc83edd2011-01-13 15:46:26 -08002764 if (i <= *classzone_idx)
Mel Gorman1741c872011-01-13 15:46:21 -08002765 balanced += zone->present_pages;
Minchan Kim45973d72010-03-05 13:41:45 -08002766 }
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 }
Mel Gormandc83edd2011-01-13 15:46:26 -08002769 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 break; /* kswapd: all done */
2771 /*
2772 * OK, kswapd is getting into trouble. Take a nap, then take
2773 * another pass across the zones.
2774 */
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002775 if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2776 if (has_under_min_watermark_zone)
2777 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2778 else
2779 congestion_wait(BLK_RW_ASYNC, HZ/10);
2780 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
2782 /*
2783 * We do this so kswapd doesn't build up large priorities for
2784 * example when it is freeing in parallel with allocators. It
2785 * matches the direct reclaim path behaviour in terms of impact
2786 * on zone->*_priority.
2787 */
Rik van Riela79311c2009-01-06 14:40:01 -08002788 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 break;
2790 }
2791out:
Mel Gorman99504742011-01-13 15:46:20 -08002792
2793 /*
2794 * order-0: All zones must meet high watermark for a balanced node
Mel Gorman1741c872011-01-13 15:46:21 -08002795 * high-order: Balanced zones must make up at least 25% of the node
2796 * for the node to be balanced
Mel Gorman99504742011-01-13 15:46:20 -08002797 */
Mel Gormandc83edd2011-01-13 15:46:26 -08002798 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 cond_resched();
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002800
2801 try_to_freeze();
2802
KOSAKI Motohiro73ce02e2009-01-06 14:40:33 -08002803 /*
2804 * Fragmentation may mean that the system cannot be
2805 * rebalanced for high-order allocations in all zones.
2806 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2807 * it means the zones have been fully scanned and are still
2808 * not balanced. For high-order allocations, there is
2809 * little point trying all over again as kswapd may
2810 * infinite loop.
2811 *
2812 * Instead, recheck all watermarks at order-0 as they
2813 * are the most important. If watermarks are ok, kswapd will go
2814 * back to sleep. High-order users can still perform direct
2815 * reclaim if they wish.
2816 */
2817 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2818 order = sc.order = 0;
2819
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 goto loop_again;
2821 }
2822
Mel Gorman99504742011-01-13 15:46:20 -08002823 /*
2824 * If kswapd was reclaiming at a higher order, it has the option of
2825 * sleeping without all zones being balanced. Before it does, it must
2826 * ensure that the watermarks for order-0 on *all* zones are met and
2827 * that the congestion flags are cleared. The congestion flag must
2828 * be cleared as kswapd is the only mechanism that clears the flag
2829 * and it is potentially going to sleep here.
2830 */
2831 if (order) {
Rik van Riel7be62de2012-03-21 16:33:52 -07002832 int zones_need_compaction = 1;
2833
Mel Gorman99504742011-01-13 15:46:20 -08002834 for (i = 0; i <= end_zone; i++) {
2835 struct zone *zone = pgdat->node_zones + i;
2836
2837 if (!populated_zone(zone))
2838 continue;
2839
Lisa Du36abcfd2013-09-11 14:22:36 -07002840 if (priority != DEF_PRIORITY &&
2841 !zone_reclaimable(zone))
Mel Gorman99504742011-01-13 15:46:20 -08002842 continue;
2843
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002844 /* Would compaction fail due to lack of free memory? */
Rik van Riel496b9192012-03-24 10:26:21 -04002845 if (COMPACTION_BUILD &&
2846 compaction_suitable(zone, order) == COMPACT_SKIPPED)
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002847 goto loop_again;
2848
Mel Gorman99504742011-01-13 15:46:20 -08002849 /* Confirm the zone is balanced for order-0 */
2850 if (!zone_watermark_ok(zone, 0,
2851 high_wmark_pages(zone), 0, 0)) {
2852 order = sc.order = 0;
2853 goto loop_again;
2854 }
2855
Rik van Riel7be62de2012-03-21 16:33:52 -07002856 /* Check if the memory needs to be defragmented. */
2857 if (zone_watermark_ok(zone, order,
2858 low_wmark_pages(zone), *classzone_idx, 0))
2859 zones_need_compaction = 0;
2860
Mel Gorman99504742011-01-13 15:46:20 -08002861 /* If balanced, clear the congested flag */
2862 zone_clear_flag(zone, ZONE_CONGESTED);
2863 }
Rik van Riel7be62de2012-03-21 16:33:52 -07002864
2865 if (zones_need_compaction)
2866 compact_pgdat(pgdat, order);
Mel Gorman99504742011-01-13 15:46:20 -08002867 }
2868
Mel Gorman0abdee22011-01-13 15:46:22 -08002869 /*
2870 * Return the order we were reclaiming at so sleeping_prematurely()
2871 * makes a decision on the order we were last reclaiming at. However,
2872 * if another caller entered the allocator slow path while kswapd
2873 * was awake, order will remain at the higher level
2874 */
Mel Gormandc83edd2011-01-13 15:46:26 -08002875 *classzone_idx = end_zone;
Mel Gorman0abdee22011-01-13 15:46:22 -08002876 return order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877}
2878
Mel Gormandc83edd2011-01-13 15:46:26 -08002879static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002880{
2881 long remaining = 0;
2882 DEFINE_WAIT(wait);
2883
2884 if (freezing(current) || kthread_should_stop())
2885 return;
2886
2887 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2888
2889 /* Try to sleep for a short interval */
Mel Gormandc83edd2011-01-13 15:46:26 -08002890 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002891 remaining = schedule_timeout(HZ/10);
2892 finish_wait(&pgdat->kswapd_wait, &wait);
2893 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2894 }
2895
2896 /*
2897 * After a short sleep, check if it was a premature sleep. If not, then
2898 * go fully to sleep until explicitly woken up.
2899 */
Mel Gormandc83edd2011-01-13 15:46:26 -08002900 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002901 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2902
2903 /*
2904 * vmstat counters are not perfectly accurate and the estimated
2905 * value for counters such as NR_FREE_PAGES can deviate from the
2906 * true value by nr_online_cpus * threshold. To avoid the zone
2907 * watermarks being breached while under pressure, we reduce the
2908 * per-cpu vmstat threshold while kswapd is awake and restore
2909 * them before going back to sleep.
2910 */
2911 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
Aaditya Kumar07aa7012012-07-17 15:48:07 -07002912
2913 if (!kthread_should_stop())
2914 schedule();
2915
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002916 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2917 } else {
2918 if (remaining)
2919 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2920 else
2921 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2922 }
2923 finish_wait(&pgdat->kswapd_wait, &wait);
2924}
2925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926/*
2927 * The background pageout daemon, started as a kernel thread
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002928 * from the init process.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 *
2930 * This basically trickles out pages so that we have _some_
2931 * free memory available even if there is no other activity
2932 * that frees anything up. This is needed for things like routing
2933 * etc, where we otherwise might have all activity going on in
2934 * asynchronous contexts that cannot page things out.
2935 *
2936 * If there are applications that are active memory-allocators
2937 * (most normal use), this basically shouldn't matter.
2938 */
2939static int kswapd(void *p)
2940{
Mel Gorman215ddd62011-07-08 15:39:40 -07002941 unsigned long order, new_order;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002942 unsigned balanced_order;
Mel Gorman215ddd62011-07-08 15:39:40 -07002943 int classzone_idx, new_classzone_idx;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002944 int balanced_classzone_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 pg_data_t *pgdat = (pg_data_t*)p;
2946 struct task_struct *tsk = current;
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 struct reclaim_state reclaim_state = {
2949 .reclaimed_slab = 0,
2950 };
Rusty Russella70f7302009-03-13 14:49:46 +10302951 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952
Nick Piggincf40bd12009-01-21 08:12:39 +01002953 lockdep_set_current_reclaim_state(GFP_KERNEL);
2954
Rusty Russell174596a2009-01-01 10:12:29 +10302955 if (!cpumask_empty(cpumask))
Mike Travisc5f59f02008-04-04 18:11:10 -07002956 set_cpus_allowed_ptr(tsk, cpumask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 current->reclaim_state = &reclaim_state;
2958
2959 /*
2960 * Tell the memory management that we're a "memory allocator",
2961 * and that if we need more memory we should get access to it
2962 * regardless (see "__alloc_pages()"). "kswapd" should
2963 * never get caught in the normal page freeing logic.
2964 *
2965 * (Kswapd normally doesn't need memory anyway, but sometimes
2966 * you need a small amount of memory in order to be able to
2967 * page out something else, and this flag essentially protects
2968 * us from recursively trying to free more memory as we're
2969 * trying to free the first piece of memory in the first place).
2970 */
Christoph Lameter930d9152006-01-08 01:00:47 -08002971 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
Rafael J. Wysocki83144182007-07-17 04:03:35 -07002972 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
Mel Gorman215ddd62011-07-08 15:39:40 -07002974 order = new_order = 0;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002975 balanced_order = 0;
Mel Gorman215ddd62011-07-08 15:39:40 -07002976 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002977 balanced_classzone_idx = classzone_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 for ( ; ; ) {
David Rientjes8fe23e02009-12-14 17:58:33 -08002979 int ret;
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07002980
Mel Gorman215ddd62011-07-08 15:39:40 -07002981 /*
2982 * If the last balance_pgdat was unsuccessful it's unlikely a
2983 * new request of a similar or harder type will succeed soon
2984 * so consider going to sleep on the basis we reclaimed at
2985 */
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002986 if (balanced_classzone_idx >= new_classzone_idx &&
2987 balanced_order == new_order) {
Mel Gorman215ddd62011-07-08 15:39:40 -07002988 new_order = pgdat->kswapd_max_order;
2989 new_classzone_idx = pgdat->classzone_idx;
2990 pgdat->kswapd_max_order = 0;
2991 pgdat->classzone_idx = pgdat->nr_zones - 1;
2992 }
2993
Mel Gorman99504742011-01-13 15:46:20 -08002994 if (order < new_order || classzone_idx > new_classzone_idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 /*
2996 * Don't sleep if someone wants a larger 'order'
Mel Gorman99504742011-01-13 15:46:20 -08002997 * allocation or has tigher zone constraints
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 */
2999 order = new_order;
Mel Gorman99504742011-01-13 15:46:20 -08003000 classzone_idx = new_classzone_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 } else {
Alex,Shid2ebd0f62011-10-31 17:08:39 -07003002 kswapd_try_to_sleep(pgdat, balanced_order,
3003 balanced_classzone_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 order = pgdat->kswapd_max_order;
Mel Gorman99504742011-01-13 15:46:20 -08003005 classzone_idx = pgdat->classzone_idx;
Alex,Shif0dfcde2011-10-31 17:08:45 -07003006 new_order = order;
3007 new_classzone_idx = classzone_idx;
Mel Gorman4d405022011-01-13 15:46:23 -08003008 pgdat->kswapd_max_order = 0;
Mel Gorman215ddd62011-07-08 15:39:40 -07003009 pgdat->classzone_idx = pgdat->nr_zones - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
David Rientjes8fe23e02009-12-14 17:58:33 -08003012 ret = try_to_freeze();
3013 if (kthread_should_stop())
3014 break;
3015
3016 /*
3017 * We can speed up thawing tasks if we don't call balance_pgdat
3018 * after returning from the refrigerator
3019 */
Mel Gorman33906bc2010-08-09 17:19:16 -07003020 if (!ret) {
3021 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
Alex,Shid2ebd0f62011-10-31 17:08:39 -07003022 balanced_classzone_idx = classzone_idx;
3023 balanced_order = balance_pgdat(pgdat, order,
3024 &balanced_classzone_idx);
Mel Gorman33906bc2010-08-09 17:19:16 -07003025 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 }
Takamori Yamaguchi3f874ec2012-11-08 15:53:39 -08003027
3028 current->reclaim_state = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 return 0;
3030}
3031
3032/*
3033 * A zone is low on free memory, so wake its kswapd task to service it.
3034 */
Mel Gorman99504742011-01-13 15:46:20 -08003035void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036{
3037 pg_data_t *pgdat;
3038
Con Kolivasf3fe6512006-01-06 00:11:15 -08003039 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 return;
3041
Paul Jackson02a0e532006-12-13 00:34:25 -08003042 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 return;
Mel Gorman88f5acf2011-01-13 15:45:41 -08003044 pgdat = zone->zone_pgdat;
Mel Gorman99504742011-01-13 15:46:20 -08003045 if (pgdat->kswapd_max_order < order) {
Mel Gorman88f5acf2011-01-13 15:45:41 -08003046 pgdat->kswapd_max_order = order;
Mel Gorman99504742011-01-13 15:46:20 -08003047 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
3048 }
Con Kolivas8d0986e2005-09-13 01:25:07 -07003049 if (!waitqueue_active(&pgdat->kswapd_wait))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 return;
Mel Gorman88f5acf2011-01-13 15:45:41 -08003051 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
3052 return;
3053
3054 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
Con Kolivas8d0986e2005-09-13 01:25:07 -07003055 wake_up_interruptible(&pgdat->kswapd_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056}
3057
Wu Fengguangadea02a2009-09-21 17:01:42 -07003058/*
3059 * The reclaimable count would be mostly accurate.
3060 * The less reclaimable pages may be
3061 * - mlocked pages, which will be moved to unevictable list when encountered
3062 * - mapped pages, which may require several travels to be reclaimed
3063 * - dirty pages, which is not "instantly" reclaimable
3064 */
3065unsigned long global_reclaimable_pages(void)
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003066{
Wu Fengguangadea02a2009-09-21 17:01:42 -07003067 int nr;
3068
3069 nr = global_page_state(NR_ACTIVE_FILE) +
3070 global_page_state(NR_INACTIVE_FILE);
3071
Shaohua Lid1c2fbe2013-02-22 16:34:38 -08003072 if (get_nr_swap_pages() > 0)
Wu Fengguangadea02a2009-09-21 17:01:42 -07003073 nr += global_page_state(NR_ACTIVE_ANON) +
3074 global_page_state(NR_INACTIVE_ANON);
3075
3076 return nr;
3077}
3078
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003079
Rafael J. Wysockic6f37f12009-05-24 22:16:31 +02003080#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081/*
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003082 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003083 * freed pages.
3084 *
3085 * Rather than trying to age LRUs the aim is to preserve the overall
3086 * LRU order by reclaiming preferentially
3087 * inactive > active > active referenced > active mapped
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 */
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003089unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090{
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003091 struct reclaim_state reclaim_state;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003092 struct scan_control sc = {
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003093 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3094 .may_swap = 1,
3095 .may_unmap = 1,
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003096 .may_writepage = 1,
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003097 .nr_to_reclaim = nr_to_reclaim,
3098 .hibernation_mode = 1,
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003099 .order = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 };
Ying Hana09ed5e2011-05-24 17:12:26 -07003101 struct shrink_control shrink = {
3102 .gfp_mask = sc.gfp_mask,
3103 };
3104 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003105 struct task_struct *p = current;
3106 unsigned long nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003108 p->flags |= PF_MEMALLOC;
3109 lockdep_set_current_reclaim_state(sc.gfp_mask);
3110 reclaim_state.reclaimed_slab = 0;
3111 p->reclaim_state = &reclaim_state;
Andrew Morton69e05942006-03-22 00:08:19 -08003112
Ying Hana09ed5e2011-05-24 17:12:26 -07003113 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003114
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003115 p->reclaim_state = NULL;
3116 lockdep_clear_current_reclaim_state();
3117 p->flags &= ~PF_MEMALLOC;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003118
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003119 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120}
Rafael J. Wysockic6f37f12009-05-24 22:16:31 +02003121#endif /* CONFIG_HIBERNATION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123/* It's optimal to keep kswapds on the same CPUs as their memory, but
3124 not required for correctness. So if the last cpu in a node goes
3125 away, we get changed to run anywhere: as the first one comes back,
3126 restore their cpu bindings. */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -07003127static int __devinit cpu_callback(struct notifier_block *nfb,
Andrew Morton69e05942006-03-22 00:08:19 -08003128 unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129{
Yasunori Goto58c0a4a2007-10-16 01:25:40 -07003130 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003132 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
Yasunori Goto58c0a4a2007-10-16 01:25:40 -07003133 for_each_node_state(nid, N_HIGH_MEMORY) {
Mike Travisc5f59f02008-04-04 18:11:10 -07003134 pg_data_t *pgdat = NODE_DATA(nid);
Rusty Russella70f7302009-03-13 14:49:46 +10303135 const struct cpumask *mask;
3136
3137 mask = cpumask_of_node(pgdat->node_id);
Mike Travisc5f59f02008-04-04 18:11:10 -07003138
Rusty Russell3e597942009-01-01 10:12:24 +10303139 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 /* One of our CPUs online: restore mask */
Mike Travisc5f59f02008-04-04 18:11:10 -07003141 set_cpus_allowed_ptr(pgdat->kswapd, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 }
3143 }
3144 return NOTIFY_OK;
3145}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146
Yasunori Goto3218ae12006-06-27 02:53:33 -07003147/*
3148 * This kswapd start function will be called by init and node-hot-add.
3149 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3150 */
3151int kswapd_run(int nid)
3152{
3153 pg_data_t *pgdat = NODE_DATA(nid);
3154 int ret = 0;
3155
3156 if (pgdat->kswapd)
3157 return 0;
3158
3159 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3160 if (IS_ERR(pgdat->kswapd)) {
3161 /* failure at boot is fatal */
3162 BUG_ON(system_state == SYSTEM_BOOTING);
3163 printk("Failed to start kswapd on node %d\n",nid);
3164 ret = -1;
3165 }
3166 return ret;
3167}
3168
David Rientjes8fe23e02009-12-14 17:58:33 -08003169/*
Jiang Liu0e343db2012-07-11 14:01:52 -07003170 * Called by memory hotplug when all memory in a node is offlined. Caller must
3171 * hold lock_memory_hotplug().
David Rientjes8fe23e02009-12-14 17:58:33 -08003172 */
3173void kswapd_stop(int nid)
3174{
3175 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3176
Jiang Liu0e343db2012-07-11 14:01:52 -07003177 if (kswapd) {
David Rientjes8fe23e02009-12-14 17:58:33 -08003178 kthread_stop(kswapd);
Jiang Liu0e343db2012-07-11 14:01:52 -07003179 NODE_DATA(nid)->kswapd = NULL;
3180 }
David Rientjes8fe23e02009-12-14 17:58:33 -08003181}
3182
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183static int __init kswapd_init(void)
3184{
Yasunori Goto3218ae12006-06-27 02:53:33 -07003185 int nid;
Andrew Morton69e05942006-03-22 00:08:19 -08003186
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 swap_setup();
Christoph Lameter9422ffb2007-10-16 01:25:31 -07003188 for_each_node_state(nid, N_HIGH_MEMORY)
Yasunori Goto3218ae12006-06-27 02:53:33 -07003189 kswapd_run(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 hotcpu_notifier(cpu_callback, 0);
3191 return 0;
3192}
3193
3194module_init(kswapd_init)
Christoph Lameter9eeff232006-01-18 17:42:31 -08003195
3196#ifdef CONFIG_NUMA
3197/*
3198 * Zone reclaim mode
3199 *
3200 * If non-zero call zone_reclaim when the number of free pages falls below
3201 * the watermarks.
Christoph Lameter9eeff232006-01-18 17:42:31 -08003202 */
3203int zone_reclaim_mode __read_mostly;
3204
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08003205#define RECLAIM_OFF 0
Fernando Luis Vazquez Cao7d034312008-07-29 22:33:41 -07003206#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08003207#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3208#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3209
Christoph Lameter9eeff232006-01-18 17:42:31 -08003210/*
Christoph Lametera92f7122006-02-01 03:05:32 -08003211 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3212 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3213 * a zone.
3214 */
3215#define ZONE_RECLAIM_PRIORITY 4
3216
Christoph Lameter9eeff232006-01-18 17:42:31 -08003217/*
Christoph Lameter96146342006-07-03 00:24:13 -07003218 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3219 * occur.
3220 */
3221int sysctl_min_unmapped_ratio = 1;
3222
3223/*
Christoph Lameter0ff38492006-09-25 23:31:52 -07003224 * If the number of slab pages in a zone grows beyond this percentage then
3225 * slab reclaim needs to occur.
3226 */
3227int sysctl_min_slab_ratio = 5;
3228
Mel Gorman90afa5d2009-06-16 15:33:20 -07003229static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3230{
3231 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3232 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3233 zone_page_state(zone, NR_ACTIVE_FILE);
3234
3235 /*
3236 * It's possible for there to be more file mapped pages than
3237 * accounted for by the pages on the file LRU lists because
3238 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3239 */
3240 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3241}
3242
3243/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3244static long zone_pagecache_reclaimable(struct zone *zone)
3245{
3246 long nr_pagecache_reclaimable;
3247 long delta = 0;
3248
3249 /*
3250 * If RECLAIM_SWAP is set, then all file pages are considered
3251 * potentially reclaimable. Otherwise, we have to worry about
3252 * pages like swapcache and zone_unmapped_file_pages() provides
3253 * a better estimate
3254 */
3255 if (zone_reclaim_mode & RECLAIM_SWAP)
3256 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3257 else
3258 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3259
3260 /* If we can't clean pages, remove dirty pages from consideration */
3261 if (!(zone_reclaim_mode & RECLAIM_WRITE))
3262 delta += zone_page_state(zone, NR_FILE_DIRTY);
3263
3264 /* Watch for any possible underflows due to delta */
3265 if (unlikely(delta > nr_pagecache_reclaimable))
3266 delta = nr_pagecache_reclaimable;
3267
3268 return nr_pagecache_reclaimable - delta;
3269}
3270
Christoph Lameter0ff38492006-09-25 23:31:52 -07003271/*
Christoph Lameter9eeff232006-01-18 17:42:31 -08003272 * Try to free up some pages from this zone through reclaim.
3273 */
Andrew Morton179e9632006-03-22 00:08:18 -08003274static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
Christoph Lameter9eeff232006-01-18 17:42:31 -08003275{
Christoph Lameter7fb2d462006-03-22 00:08:22 -08003276 /* Minimum pages needed in order to stay on node */
Andrew Morton69e05942006-03-22 00:08:19 -08003277 const unsigned long nr_pages = 1 << order;
Christoph Lameter9eeff232006-01-18 17:42:31 -08003278 struct task_struct *p = current;
3279 struct reclaim_state reclaim_state;
Christoph Lameter86959492006-03-22 00:08:18 -08003280 int priority;
Andrew Morton179e9632006-03-22 00:08:18 -08003281 struct scan_control sc = {
3282 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
Johannes Weinera6dc60f2009-03-31 15:19:30 -07003283 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07003284 .may_swap = 1,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08003285 .nr_to_reclaim = max_t(unsigned long, nr_pages,
3286 SWAP_CLUSTER_MAX),
Andrew Morton179e9632006-03-22 00:08:18 -08003287 .gfp_mask = gfp_mask,
Johannes Weinerbd2f6192009-03-31 15:19:38 -07003288 .order = order,
Andrew Morton179e9632006-03-22 00:08:18 -08003289 };
Ying Hana09ed5e2011-05-24 17:12:26 -07003290 struct shrink_control shrink = {
3291 .gfp_mask = sc.gfp_mask,
3292 };
KOSAKI Motohiro15748042010-08-09 17:19:50 -07003293 unsigned long nr_slab_pages0, nr_slab_pages1;
Christoph Lameter9eeff232006-01-18 17:42:31 -08003294
Christoph Lameter9eeff232006-01-18 17:42:31 -08003295 cond_resched();
Christoph Lameterd4f77962006-02-24 13:04:22 -08003296 /*
3297 * We need to be able to allocate from the reserves for RECLAIM_SWAP
3298 * and we also need to be able to write out pages for RECLAIM_WRITE
3299 * and RECLAIM_SWAP.
3300 */
3301 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
KOSAKI Motohiro76ca5422010-03-05 13:41:47 -08003302 lockdep_set_current_reclaim_state(gfp_mask);
Christoph Lameter9eeff232006-01-18 17:42:31 -08003303 reclaim_state.reclaimed_slab = 0;
3304 p->reclaim_state = &reclaim_state;
Christoph Lameterc84db232006-02-01 03:05:29 -08003305
Mel Gorman90afa5d2009-06-16 15:33:20 -07003306 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
Christoph Lameter0ff38492006-09-25 23:31:52 -07003307 /*
3308 * Free memory by calling shrink zone with increasing
3309 * priorities until we have enough memory freed.
3310 */
3311 priority = ZONE_RECLAIM_PRIORITY;
3312 do {
Rik van Riela79311c2009-01-06 14:40:01 -08003313 shrink_zone(priority, zone, &sc);
Christoph Lameter0ff38492006-09-25 23:31:52 -07003314 priority--;
Rik van Riela79311c2009-01-06 14:40:01 -08003315 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
Christoph Lameter0ff38492006-09-25 23:31:52 -07003316 }
Christoph Lameterc84db232006-02-01 03:05:29 -08003317
KOSAKI Motohiro15748042010-08-09 17:19:50 -07003318 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3319 if (nr_slab_pages0 > zone->min_slab_pages) {
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003320 /*
Christoph Lameter7fb2d462006-03-22 00:08:22 -08003321 * shrink_slab() does not currently allow us to determine how
Christoph Lameter0ff38492006-09-25 23:31:52 -07003322 * many pages were freed in this zone. So we take the current
3323 * number of slab pages and shake the slab until it is reduced
3324 * by the same nr_pages that we used for reclaiming unmapped
3325 * pages.
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003326 *
Christoph Lameter0ff38492006-09-25 23:31:52 -07003327 * Note that shrink_slab will free memory on all zones and may
3328 * take a long time.
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003329 */
KOSAKI Motohiro4dc4b3d2010-08-09 17:19:54 -07003330 for (;;) {
3331 unsigned long lru_pages = zone_reclaimable_pages(zone);
3332
3333 /* No reclaimable slab or very low memory pressure */
Ying Han1495f232011-05-24 17:12:27 -07003334 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
KOSAKI Motohiro4dc4b3d2010-08-09 17:19:54 -07003335 break;
3336
3337 /* Freed enough memory */
3338 nr_slab_pages1 = zone_page_state(zone,
3339 NR_SLAB_RECLAIMABLE);
3340 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3341 break;
3342 }
Christoph Lameter83e33a42006-09-25 23:31:53 -07003343
3344 /*
3345 * Update nr_reclaimed by the number of slab pages we
3346 * reclaimed from this zone.
3347 */
KOSAKI Motohiro15748042010-08-09 17:19:50 -07003348 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3349 if (nr_slab_pages1 < nr_slab_pages0)
3350 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003351 }
3352
Christoph Lameter9eeff232006-01-18 17:42:31 -08003353 p->reclaim_state = NULL;
Christoph Lameterd4f77962006-02-24 13:04:22 -08003354 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
KOSAKI Motohiro76ca5422010-03-05 13:41:47 -08003355 lockdep_clear_current_reclaim_state();
Rik van Riela79311c2009-01-06 14:40:01 -08003356 return sc.nr_reclaimed >= nr_pages;
Christoph Lameter9eeff232006-01-18 17:42:31 -08003357}
Andrew Morton179e9632006-03-22 00:08:18 -08003358
3359int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3360{
Andrew Morton179e9632006-03-22 00:08:18 -08003361 int node_id;
David Rientjesd773ed62007-10-16 23:26:01 -07003362 int ret;
Andrew Morton179e9632006-03-22 00:08:18 -08003363
3364 /*
Christoph Lameter0ff38492006-09-25 23:31:52 -07003365 * Zone reclaim reclaims unmapped file backed pages and
3366 * slab pages if we are over the defined limits.
Christoph Lameter34aa1332006-06-30 01:55:37 -07003367 *
Christoph Lameter96146342006-07-03 00:24:13 -07003368 * A small portion of unmapped file backed pages is needed for
3369 * file I/O otherwise pages read by file I/O will be immediately
3370 * thrown out if the zone is overallocated. So we do not reclaim
3371 * if less than a specified percentage of the zone is used by
3372 * unmapped file backed pages.
Andrew Morton179e9632006-03-22 00:08:18 -08003373 */
Mel Gorman90afa5d2009-06-16 15:33:20 -07003374 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3375 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
Mel Gormanfa5e0842009-06-16 15:33:22 -07003376 return ZONE_RECLAIM_FULL;
Andrew Morton179e9632006-03-22 00:08:18 -08003377
Lisa Du36abcfd2013-09-11 14:22:36 -07003378 if (!zone_reclaimable(zone))
Mel Gormanfa5e0842009-06-16 15:33:22 -07003379 return ZONE_RECLAIM_FULL;
David Rientjesd773ed62007-10-16 23:26:01 -07003380
Andrew Morton179e9632006-03-22 00:08:18 -08003381 /*
David Rientjesd773ed62007-10-16 23:26:01 -07003382 * Do not scan if the allocation should not be delayed.
Andrew Morton179e9632006-03-22 00:08:18 -08003383 */
David Rientjesd773ed62007-10-16 23:26:01 -07003384 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
Mel Gormanfa5e0842009-06-16 15:33:22 -07003385 return ZONE_RECLAIM_NOSCAN;
Andrew Morton179e9632006-03-22 00:08:18 -08003386
3387 /*
3388 * Only run zone reclaim on the local zone or on zones that do not
3389 * have associated processors. This will favor the local processor
3390 * over remote processors and spread off node memory allocations
3391 * as wide as possible.
3392 */
Christoph Lameter89fa3022006-09-25 23:31:55 -07003393 node_id = zone_to_nid(zone);
Christoph Lameter37c07082007-10-16 01:25:36 -07003394 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
Mel Gormanfa5e0842009-06-16 15:33:22 -07003395 return ZONE_RECLAIM_NOSCAN;
David Rientjesd773ed62007-10-16 23:26:01 -07003396
3397 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
Mel Gormanfa5e0842009-06-16 15:33:22 -07003398 return ZONE_RECLAIM_NOSCAN;
3399
David Rientjesd773ed62007-10-16 23:26:01 -07003400 ret = __zone_reclaim(zone, gfp_mask, order);
3401 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3402
Mel Gorman24cf725182009-06-16 15:33:23 -07003403 if (!ret)
3404 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3405
David Rientjesd773ed62007-10-16 23:26:01 -07003406 return ret;
Andrew Morton179e9632006-03-22 00:08:18 -08003407}
Christoph Lameter9eeff232006-01-18 17:42:31 -08003408#endif
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003409
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003410/*
3411 * page_evictable - test whether a page is evictable
3412 * @page: the page to test
3413 * @vma: the VMA in which the page is or will be mapped, may be NULL
3414 *
3415 * Test whether page is evictable--i.e., should be placed on active/inactive
Nick Pigginb291f002008-10-18 20:26:44 -07003416 * lists vs unevictable list. The vma argument is !NULL when called from the
3417 * fault path to determine how to instantate a new page.
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003418 *
3419 * Reasons page might not be evictable:
Lee Schermerhornba9ddf42008-10-18 20:26:42 -07003420 * (1) page's mapping marked unevictable
Nick Pigginb291f002008-10-18 20:26:44 -07003421 * (2) page is part of an mlocked VMA
Lee Schermerhornba9ddf42008-10-18 20:26:42 -07003422 *
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003423 */
3424int page_evictable(struct page *page, struct vm_area_struct *vma)
3425{
3426
Lee Schermerhornba9ddf42008-10-18 20:26:42 -07003427 if (mapping_unevictable(page_mapping(page)))
3428 return 0;
3429
Nick Pigginb291f002008-10-18 20:26:44 -07003430 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
3431 return 0;
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003432
3433 return 1;
3434}
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003435
Hugh Dickins85046572012-01-20 14:34:19 -08003436#ifdef CONFIG_SHMEM
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003437/**
Hugh Dickins24513262012-01-20 14:34:21 -08003438 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3439 * @pages: array of pages to check
3440 * @nr_pages: number of pages to check
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003441 *
Hugh Dickins24513262012-01-20 14:34:21 -08003442 * Checks pages for evictability and moves them to the appropriate lru list.
Hugh Dickins85046572012-01-20 14:34:19 -08003443 *
3444 * This function is only used for SysV IPC SHM_UNLOCK.
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003445 */
Hugh Dickins24513262012-01-20 14:34:21 -08003446void check_move_unevictable_pages(struct page **pages, int nr_pages)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003447{
Johannes Weiner925b7672012-01-12 17:18:15 -08003448 struct lruvec *lruvec;
Hugh Dickins24513262012-01-20 14:34:21 -08003449 struct zone *zone = NULL;
3450 int pgscanned = 0;
3451 int pgrescued = 0;
3452 int i;
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003453
Hugh Dickins24513262012-01-20 14:34:21 -08003454 for (i = 0; i < nr_pages; i++) {
3455 struct page *page = pages[i];
3456 struct zone *pagezone;
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003457
Hugh Dickins24513262012-01-20 14:34:21 -08003458 pgscanned++;
3459 pagezone = page_zone(page);
3460 if (pagezone != zone) {
3461 if (zone)
3462 spin_unlock_irq(&zone->lru_lock);
3463 zone = pagezone;
3464 spin_lock_irq(&zone->lru_lock);
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003465 }
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003466
Hugh Dickins24513262012-01-20 14:34:21 -08003467 if (!PageLRU(page) || !PageUnevictable(page))
3468 continue;
3469
3470 if (page_evictable(page, NULL)) {
3471 enum lru_list lru = page_lru_base_type(page);
3472
3473 VM_BUG_ON(PageActive(page));
3474 ClearPageUnevictable(page);
3475 __dec_zone_state(zone, NR_UNEVICTABLE);
3476 lruvec = mem_cgroup_lru_move_lists(zone, page,
3477 LRU_UNEVICTABLE, lru);
3478 list_move(&page->lru, &lruvec->lists[lru]);
3479 __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
3480 pgrescued++;
3481 }
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003482 }
Hugh Dickins24513262012-01-20 14:34:21 -08003483
3484 if (zone) {
3485 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3486 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3487 spin_unlock_irq(&zone->lru_lock);
3488 }
Hugh Dickins85046572012-01-20 14:34:19 -08003489}
3490#endif /* CONFIG_SHMEM */
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003491
Johannes Weiner264e56d2011-10-31 17:09:13 -07003492static void warn_scan_unevictable_pages(void)
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003493{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003494 printk_once(KERN_WARNING
KOSAKI Motohiro25bd91b2012-01-10 15:07:40 -08003495 "%s: The scan_unevictable_pages sysctl/node-interface has been "
Johannes Weiner264e56d2011-10-31 17:09:13 -07003496 "disabled for lack of a legitimate use case. If you have "
KOSAKI Motohiro25bd91b2012-01-10 15:07:40 -08003497 "one, please send an email to linux-mm@kvack.org.\n",
3498 current->comm);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003499}
3500
3501/*
3502 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3503 * all nodes' unevictable lists for evictable pages
3504 */
3505unsigned long scan_unevictable_pages;
3506
3507int scan_unevictable_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003508 void __user *buffer,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003509 size_t *length, loff_t *ppos)
3510{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003511 warn_scan_unevictable_pages();
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003512 proc_doulongvec_minmax(table, write, buffer, length, ppos);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003513 scan_unevictable_pages = 0;
3514 return 0;
3515}
3516
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -07003517#ifdef CONFIG_NUMA
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003518/*
3519 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3520 * a specified node's per zone unevictable lists for evictable pages.
3521 */
3522
Kay Sievers10fbcf42011-12-21 14:48:43 -08003523static ssize_t read_scan_unevictable_node(struct device *dev,
3524 struct device_attribute *attr,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003525 char *buf)
3526{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003527 warn_scan_unevictable_pages();
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003528 return sprintf(buf, "0\n"); /* always zero; should fit... */
3529}
3530
Kay Sievers10fbcf42011-12-21 14:48:43 -08003531static ssize_t write_scan_unevictable_node(struct device *dev,
3532 struct device_attribute *attr,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003533 const char *buf, size_t count)
3534{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003535 warn_scan_unevictable_pages();
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003536 return 1;
3537}
3538
3539
Kay Sievers10fbcf42011-12-21 14:48:43 -08003540static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003541 read_scan_unevictable_node,
3542 write_scan_unevictable_node);
3543
3544int scan_unevictable_register_node(struct node *node)
3545{
Kay Sievers10fbcf42011-12-21 14:48:43 -08003546 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003547}
3548
3549void scan_unevictable_unregister_node(struct node *node)
3550{
Kay Sievers10fbcf42011-12-21 14:48:43 -08003551 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003552}
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -07003553#endif