blob: 203252544e45c064e55d2197ee8cbb48dbbc5b95 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070015#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070016#include <linux/sysfs.h>
Mel Gorman748446b2010-05-24 14:32:27 -070017#include "internal.h"
18
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +010019#if defined CONFIG_COMPACTION || defined CONFIG_CMA
20
Mel Gormanb7aba692011-01-13 15:45:54 -080021#define CREATE_TRACE_POINTS
22#include <trace/events/compaction.h>
23
Mel Gorman748446b2010-05-24 14:32:27 -070024static unsigned long release_freepages(struct list_head *freelist)
25{
26 struct page *page, *next;
27 unsigned long count = 0;
28
29 list_for_each_entry_safe(page, next, freelist, lru) {
30 list_del(&page->lru);
31 __free_page(page);
32 count++;
33 }
34
35 return count;
36}
37
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +010038static void map_pages(struct list_head *list)
39{
40 struct page *page;
41
42 list_for_each_entry(page, list, lru) {
43 arch_alloc_page(page, 0);
44 kernel_map_pages(page, 1, 1);
45 }
46}
47
Michal Nazarewiczd4158d22011-12-29 13:09:50 +010048static inline bool migrate_async_suitable(int migratetype)
49{
50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
51}
52
Mel Gorman6a38cba2012-10-08 16:32:33 -070053static inline bool should_release_lock(spinlock_t *lock)
54{
55 return need_resched() || spin_is_contended(lock);
56}
57
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +010058/*
Mel Gorman0ba33882012-08-21 16:16:17 -070059 * Compaction requires the taking of some coarse locks that are potentially
60 * very heavily contended. Check if the process needs to be scheduled or
61 * if the lock is contended. For async compaction, back out in the event
62 * if contention is severe. For sync compaction, schedule.
63 *
64 * Returns true if the lock is held.
65 * Returns false if the lock is released and compaction should abort
66 */
67static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
68 bool locked, struct compact_control *cc)
69{
Mel Gorman6a38cba2012-10-08 16:32:33 -070070 if (should_release_lock(lock)) {
Mel Gorman0ba33882012-08-21 16:16:17 -070071 if (locked) {
72 spin_unlock_irqrestore(lock, *flags);
73 locked = false;
74 }
75
76 /* async aborts if taking too long or contended */
77 if (!cc->sync) {
Shaohua Li03dd8fe2012-10-08 16:32:27 -070078 cc->contended = true;
Mel Gorman0ba33882012-08-21 16:16:17 -070079 return false;
80 }
81
82 cond_resched();
Mel Gorman0ba33882012-08-21 16:16:17 -070083 }
84
85 if (!locked)
86 spin_lock_irqsave(lock, *flags);
87 return true;
88}
89
90static inline bool compact_trylock_irqsave(spinlock_t *lock,
91 unsigned long *flags, struct compact_control *cc)
92{
93 return compact_checklock_irqsave(lock, flags, false, cc);
94}
95
Mel Gormanc74068b2012-10-08 16:29:12 -070096static void compact_capture_page(struct compact_control *cc)
97{
98 unsigned long flags;
99 int mtype, mtype_low, mtype_high;
100
101 if (!cc->page || *cc->page)
102 return;
103
104 /*
105 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
106 * regardless of the migratetype of the freelist is is captured from.
107 * This is fine because the order for a high-order MIGRATE_MOVABLE
108 * allocation is typically at least a pageblock size and overall
109 * fragmentation is not impaired. Other allocation types must
110 * capture pages from their own migratelist because otherwise they
111 * could pollute other pageblocks like MIGRATE_MOVABLE with
112 * difficult to move pages and making fragmentation worse overall.
113 */
114 if (cc->migratetype == MIGRATE_MOVABLE) {
115 mtype_low = 0;
116 mtype_high = MIGRATE_PCPTYPES;
117 } else {
118 mtype_low = cc->migratetype;
119 mtype_high = cc->migratetype + 1;
120 }
121
122 /* Speculatively examine the free lists without zone lock */
123 for (mtype = mtype_low; mtype < mtype_high; mtype++) {
124 int order;
125 for (order = cc->order; order < MAX_ORDER; order++) {
126 struct page *page;
127 struct free_area *area;
128 area = &(cc->zone->free_area[order]);
129 if (list_empty(&area->free_list[mtype]))
130 continue;
131
132 /* Take the lock and attempt capture of the page */
133 if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
134 return;
135 if (!list_empty(&area->free_list[mtype])) {
136 page = list_entry(area->free_list[mtype].next,
137 struct page, lru);
138 if (capture_free_page(page, cc->order, mtype)) {
139 spin_unlock_irqrestore(&cc->zone->lock,
140 flags);
141 *cc->page = page;
142 return;
143 }
144 }
145 spin_unlock_irqrestore(&cc->zone->lock, flags);
146 }
147 }
148}
149
Mel Gorman0ba33882012-08-21 16:16:17 -0700150/*
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100151 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
152 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
153 * pages inside of the pageblock (even though it may still end up isolating
154 * some pages).
155 */
156static unsigned long isolate_freepages_block(unsigned long blockpfn,
157 unsigned long end_pfn,
158 struct list_head *freelist,
159 bool strict)
Mel Gorman748446b2010-05-24 14:32:27 -0700160{
Mel Gormanb7aba692011-01-13 15:45:54 -0800161 int nr_scanned = 0, total_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700162 struct page *cursor;
163
Mel Gorman748446b2010-05-24 14:32:27 -0700164 cursor = pfn_to_page(blockpfn);
165
166 /* Isolate free pages. This assumes the block is valid */
167 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
168 int isolated, i;
169 struct page *page = cursor;
170
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100171 if (!pfn_valid_within(blockpfn)) {
172 if (strict)
173 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700174 continue;
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100175 }
Mel Gormanb7aba692011-01-13 15:45:54 -0800176 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700177
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100178 if (!PageBuddy(page)) {
179 if (strict)
180 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700181 continue;
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100182 }
Mel Gorman748446b2010-05-24 14:32:27 -0700183
184 /* Found a free page, break it into order-0 pages */
185 isolated = split_free_page(page);
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100186 if (!isolated && strict)
187 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700188 total_isolated += isolated;
189 for (i = 0; i < isolated; i++) {
190 list_add(&page->lru, freelist);
191 page++;
192 }
193
194 /* If a page was split, advance to the end of it */
195 if (isolated) {
196 blockpfn += isolated - 1;
197 cursor += isolated - 1;
198 }
199 }
200
Mel Gormanb7aba692011-01-13 15:45:54 -0800201 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700202 return total_isolated;
203}
204
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100205/**
206 * isolate_freepages_range() - isolate free pages.
207 * @start_pfn: The first PFN to start isolating.
208 * @end_pfn: The one-past-last PFN.
209 *
210 * Non-free pages, invalid PFNs, or zone boundaries within the
211 * [start_pfn, end_pfn) range are considered errors, cause function to
212 * undo its actions and return zero.
213 *
214 * Otherwise, function returns one-past-the-last PFN of isolated page
215 * (which may be greater then end_pfn if end fell in a middle of
216 * a free page).
217 */
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100218unsigned long
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100219isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
220{
221 unsigned long isolated, pfn, block_end_pfn, flags;
222 struct zone *zone = NULL;
223 LIST_HEAD(freelist);
224
225 if (pfn_valid(start_pfn))
226 zone = page_zone(pfn_to_page(start_pfn));
227
228 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
229 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
230 break;
231
232 /*
233 * On subsequent iterations ALIGN() is actually not needed,
234 * but we keep it that we not to complicate the code.
235 */
236 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
237 block_end_pfn = min(block_end_pfn, end_pfn);
238
239 spin_lock_irqsave(&zone->lock, flags);
240 isolated = isolate_freepages_block(pfn, block_end_pfn,
241 &freelist, true);
242 spin_unlock_irqrestore(&zone->lock, flags);
243
244 /*
245 * In strict mode, isolate_freepages_block() returns 0 if
246 * there are any holes in the block (ie. invalid PFNs or
247 * non-free pages).
248 */
249 if (!isolated)
250 break;
251
252 /*
253 * If we managed to isolate pages, it is always (1 << n) *
254 * pageblock_nr_pages for some non-negative n. (Max order
255 * page may span two pageblocks).
256 */
257 }
258
259 /* split_free_page does not map the pages */
260 map_pages(&freelist);
261
262 if (pfn < end_pfn) {
263 /* Loop terminated early, cleanup. */
264 release_freepages(&freelist);
265 return 0;
266 }
267
268 /* We don't use freelists for anything. */
269 return pfn;
270}
271
Mel Gorman748446b2010-05-24 14:32:27 -0700272/* Update the number of anon and file isolated pages in the zone */
Mel Gorman0ba33882012-08-21 16:16:17 -0700273static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -0700274{
275 struct page *page;
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700276 unsigned int count[2] = { 0, };
Mel Gorman748446b2010-05-24 14:32:27 -0700277
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700278 list_for_each_entry(page, &cc->migratepages, lru)
279 count[!!page_is_file_cache(page)]++;
Mel Gorman748446b2010-05-24 14:32:27 -0700280
Mel Gorman0ba33882012-08-21 16:16:17 -0700281 /* If locked we can use the interrupt unsafe versions */
282 if (locked) {
283 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
284 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
285 } else {
286 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
287 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
288 }
Mel Gorman748446b2010-05-24 14:32:27 -0700289}
290
291/* Similar to reclaim, but different enough that they don't share logic */
292static bool too_many_isolated(struct zone *zone)
293{
Minchan Kimbc693042010-09-09 16:38:00 -0700294 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700295
296 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
297 zone_page_state(zone, NR_INACTIVE_ANON);
Minchan Kimbc693042010-09-09 16:38:00 -0700298 active = zone_page_state(zone, NR_ACTIVE_FILE) +
299 zone_page_state(zone, NR_ACTIVE_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700300 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
301 zone_page_state(zone, NR_ISOLATED_ANON);
302
Minchan Kimbc693042010-09-09 16:38:00 -0700303 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700304}
305
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100306/**
307 * isolate_migratepages_range() - isolate all migrate-able pages in range.
308 * @zone: Zone pages are in.
309 * @cc: Compaction control structure.
310 * @low_pfn: The first PFN of the range.
311 * @end_pfn: The one-past-the-last PFN of the range.
312 *
313 * Isolate all pages that can be migrated from the range specified by
314 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
315 * pending), otherwise PFN of the first page that was not scanned
316 * (which may be both less, equal to or more then end_pfn).
317 *
318 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
319 * zero.
320 *
321 * Apart from cc->migratepages and cc->nr_migratetypes this function
322 * does not modify any cc's fields, in particular it does not modify
323 * (or read for that matter) cc->migrate_pfn.
Mel Gorman748446b2010-05-24 14:32:27 -0700324 */
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100325unsigned long
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100326isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
327 unsigned long low_pfn, unsigned long end_pfn)
Mel Gorman748446b2010-05-24 14:32:27 -0700328{
Mel Gorman9927af742011-01-13 15:45:59 -0800329 unsigned long last_pageblock_nr = 0, pageblock_nr;
Mel Gormanb7aba692011-01-13 15:45:54 -0800330 unsigned long nr_scanned = 0, nr_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700331 struct list_head *migratelist = &cc->migratepages;
Konstantin Khlebnikovfa168092012-05-29 15:06:54 -0700332 isolate_mode_t mode = 0;
Mel Gorman0ba33882012-08-21 16:16:17 -0700333 unsigned long flags;
Mel Gorman6a38cba2012-10-08 16:32:33 -0700334 bool locked = false;
Mel Gorman748446b2010-05-24 14:32:27 -0700335
Mel Gorman748446b2010-05-24 14:32:27 -0700336 /*
337 * Ensure that there are not too many pages isolated from the LRU
338 * list by either parallel reclaimers or compaction. If there are,
339 * delay for some time until fewer pages are isolated
340 */
341 while (unlikely(too_many_isolated(zone))) {
Mel Gormanf9e35b32011-06-15 15:08:52 -0700342 /* async migration should just abort */
343 if (!cc->sync)
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100344 return 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700345
Mel Gorman748446b2010-05-24 14:32:27 -0700346 congestion_wait(BLK_RW_ASYNC, HZ/10);
347
348 if (fatal_signal_pending(current))
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100349 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700350 }
351
352 /* Time to isolate some pages for migration */
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700353 cond_resched();
Mel Gorman748446b2010-05-24 14:32:27 -0700354 for (; low_pfn < end_pfn; low_pfn++) {
355 struct page *page;
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700356
357 /* give a chance to irqs before checking need_resched() */
Mel Gorman6a38cba2012-10-08 16:32:33 -0700358 if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
359 if (should_release_lock(&zone->lru_lock)) {
360 spin_unlock_irqrestore(&zone->lru_lock, flags);
361 locked = false;
362 }
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700363 }
Mel Gorman0ba33882012-08-21 16:16:17 -0700364
Mel Gorman0bf380b2012-02-03 15:37:18 -0800365 /*
366 * migrate_pfn does not necessarily start aligned to a
367 * pageblock. Ensure that pfn_valid is called when moving
368 * into a new MAX_ORDER_NR_PAGES range in case of large
369 * memory holes within the zone
370 */
371 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
372 if (!pfn_valid(low_pfn)) {
373 low_pfn += MAX_ORDER_NR_PAGES - 1;
374 continue;
375 }
376 }
377
Mel Gorman748446b2010-05-24 14:32:27 -0700378 if (!pfn_valid_within(low_pfn))
379 continue;
Mel Gormanb7aba692011-01-13 15:45:54 -0800380 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700381
Mel Gormandc908602012-02-08 17:13:38 -0800382 /*
383 * Get the page and ensure the page is within the same zone.
384 * See the comment in isolate_freepages about overlapping
385 * nodes. It is deliberate that the new zone lock is not taken
386 * as memory compaction should not move pages between nodes.
387 */
Mel Gorman748446b2010-05-24 14:32:27 -0700388 page = pfn_to_page(low_pfn);
Mel Gormandc908602012-02-08 17:13:38 -0800389 if (page_zone(page) != zone)
390 continue;
391
392 /* Skip if free */
Mel Gorman748446b2010-05-24 14:32:27 -0700393 if (PageBuddy(page))
394 continue;
395
Mel Gorman9927af742011-01-13 15:45:59 -0800396 /*
397 * For async migration, also only scan in MOVABLE blocks. Async
398 * migration is optimistic to see if the minimum amount of work
399 * satisfies the allocation
400 */
401 pageblock_nr = low_pfn >> pageblock_order;
402 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
Michal Nazarewiczd4158d22011-12-29 13:09:50 +0100403 !migrate_async_suitable(get_pageblock_migratetype(page))) {
Mel Gorman6a38cba2012-10-08 16:32:33 -0700404 goto next_pageblock;
Mel Gorman9927af742011-01-13 15:45:59 -0800405 }
406
Mel Gorman6a38cba2012-10-08 16:32:33 -0700407 /* Check may be lockless but that's ok as we recheck later */
Andrea Arcangelibc835012011-01-13 15:47:08 -0800408 if (!PageLRU(page))
409 continue;
410
411 /*
Mel Gorman6a38cba2012-10-08 16:32:33 -0700412 * PageLRU is set. lru_lock normally excludes isolation
413 * splitting and collapsing (collapsing has already happened
414 * if PageLRU is set) but the lock is not necessarily taken
415 * here and it is wasteful to take it just to check transhuge.
416 * Check TransHuge without lock and skip the whole pageblock if
417 * it's either a transhuge or hugetlbfs page, as calling
418 * compound_order() without preventing THP from splitting the
419 * page underneath us may return surprising results.
Andrea Arcangelibc835012011-01-13 15:47:08 -0800420 */
421 if (PageTransHuge(page)) {
Mel Gorman6a38cba2012-10-08 16:32:33 -0700422 if (!locked)
423 goto next_pageblock;
424 low_pfn += (1 << compound_order(page)) - 1;
425 continue;
426 }
427
428 /* Check if it is ok to still hold the lock */
429 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
430 locked, cc);
431 if (!locked || fatal_signal_pending(current))
432 break;
433
434 /* Recheck PageLRU and PageTransHuge under lock */
435 if (!PageLRU(page))
436 continue;
437 if (PageTransHuge(page)) {
Andrea Arcangelibc835012011-01-13 15:47:08 -0800438 low_pfn += (1 << compound_order(page)) - 1;
439 continue;
440 }
441
Mel Gormanc8244932012-01-12 17:19:38 -0800442 if (!cc->sync)
443 mode |= ISOLATE_ASYNC_MIGRATE;
444
Mel Gorman748446b2010-05-24 14:32:27 -0700445 /* Try isolate the page */
Konstantin Khlebnikovfa168092012-05-29 15:06:54 -0700446 if (__isolate_lru_page(page, mode) != 0)
Mel Gorman748446b2010-05-24 14:32:27 -0700447 continue;
448
Andrea Arcangelibc835012011-01-13 15:47:08 -0800449 VM_BUG_ON(PageTransCompound(page));
450
Mel Gorman748446b2010-05-24 14:32:27 -0700451 /* Successfully isolated */
452 del_page_from_lru_list(zone, page, page_lru(page));
453 list_add(&page->lru, migratelist);
Mel Gorman748446b2010-05-24 14:32:27 -0700454 cc->nr_migratepages++;
Mel Gormanb7aba692011-01-13 15:45:54 -0800455 nr_isolated++;
Mel Gorman748446b2010-05-24 14:32:27 -0700456
457 /* Avoid isolating too much */
Hillf Danton31b83842012-01-10 15:07:59 -0800458 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
459 ++low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700460 break;
Hillf Danton31b83842012-01-10 15:07:59 -0800461 }
Mel Gorman6a38cba2012-10-08 16:32:33 -0700462
463 continue;
464
465next_pageblock:
466 low_pfn += pageblock_nr_pages;
467 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
468 last_pageblock_nr = pageblock_nr;
Mel Gorman748446b2010-05-24 14:32:27 -0700469 }
470
Mel Gorman0ba33882012-08-21 16:16:17 -0700471 acct_isolated(zone, locked, cc);
Mel Gorman748446b2010-05-24 14:32:27 -0700472
Mel Gorman0ba33882012-08-21 16:16:17 -0700473 if (locked)
474 spin_unlock_irqrestore(&zone->lru_lock, flags);
Mel Gorman748446b2010-05-24 14:32:27 -0700475
Mel Gormanb7aba692011-01-13 15:45:54 -0800476 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
477
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100478 return low_pfn;
479}
480
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100481#endif /* CONFIG_COMPACTION || CONFIG_CMA */
482#ifdef CONFIG_COMPACTION
483
484/* Returns true if the page is within a block suitable for migration to */
485static bool suitable_migration_target(struct page *page)
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100486{
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100487
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100488 int migratetype = get_pageblock_migratetype(page);
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100489
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100490 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
491 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
492 return false;
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100493
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100494 /* If the page is a large free page, then allow migration */
495 if (PageBuddy(page) && page_order(page) >= pageblock_order)
496 return true;
497
Michal Nazarewiczd4158d22011-12-29 13:09:50 +0100498 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
499 if (migrate_async_suitable(migratetype))
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100500 return true;
501
502 /* Otherwise skip the block */
503 return false;
504}
505
506/*
507 * Based on information in the current compact_control, find blocks
508 * suitable for isolating free pages from and then isolate them.
509 */
510static void isolate_freepages(struct zone *zone,
511 struct compact_control *cc)
512{
513 struct page *page;
514 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
515 unsigned long flags;
516 int nr_freepages = cc->nr_freepages;
517 struct list_head *freelist = &cc->freepages;
518
519 /*
520 * Initialise the free scanner. The starting point is where we last
521 * scanned from (or the end of the zone if starting). The low point
522 * is the end of the pageblock the migration scanner is using.
523 */
524 pfn = cc->free_pfn;
525 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
526
527 /*
528 * Take care that if the migration scanner is at the end of the zone
529 * that the free scanner does not accidentally move to the next zone
530 * in the next isolation cycle.
531 */
532 high_pfn = min(low_pfn, pfn);
533
534 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
535
536 /*
537 * Isolate free pages until enough are available to migrate the
538 * pages on cc->migratepages. We stop searching if the migrate
539 * and free page scanners meet or enough free pages are isolated.
540 */
541 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
542 pfn -= pageblock_nr_pages) {
543 unsigned long isolated;
544
545 if (!pfn_valid(pfn))
546 continue;
547
548 /*
549 * Check for overlapping nodes/zones. It's possible on some
550 * configurations to have a setup like
551 * node0 node1 node0
552 * i.e. it's possible that all pages within a zones range of
553 * pages do not belong to a single zone.
554 */
555 page = pfn_to_page(pfn);
556 if (page_zone(page) != zone)
557 continue;
558
559 /* Check the block is suitable for migration */
560 if (!suitable_migration_target(page))
561 continue;
562
563 /*
564 * Found a block suitable for isolating free pages from. Now
565 * we disabled interrupts, double check things are ok and
566 * isolate the pages. This is to minimise the time IRQs
567 * are disabled
568 */
569 isolated = 0;
Mel Gorman0ba33882012-08-21 16:16:17 -0700570
571 /*
572 * The zone lock must be held to isolate freepages. This
573 * unfortunately this is a very coarse lock and can be
574 * heavily contended if there are parallel allocations
575 * or parallel compactions. For async compaction do not
576 * spin on the lock
577 */
578 if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
579 break;
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100580 if (suitable_migration_target(page)) {
581 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
582 isolated = isolate_freepages_block(pfn, end_pfn,
583 freelist, false);
584 nr_freepages += isolated;
585 }
586 spin_unlock_irqrestore(&zone->lock, flags);
587
588 /*
589 * Record the highest PFN we isolated pages from. When next
590 * looking for free pages, the search will restart here as
591 * page migration may have returned some pages to the allocator
592 */
593 if (isolated)
594 high_pfn = max(high_pfn, pfn);
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100595 }
596
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100597 /* split_free_page does not map the pages */
598 map_pages(freelist);
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100599
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100600 cc->free_pfn = high_pfn;
601 cc->nr_freepages = nr_freepages;
Mel Gorman748446b2010-05-24 14:32:27 -0700602}
603
604/*
605 * This is a migrate-callback that "allocates" freepages by taking pages
606 * from the isolated freelists in the block we are migrating to.
607 */
608static struct page *compaction_alloc(struct page *migratepage,
609 unsigned long data,
610 int **result)
611{
612 struct compact_control *cc = (struct compact_control *)data;
613 struct page *freepage;
614
615 /* Isolate free pages if necessary */
616 if (list_empty(&cc->freepages)) {
617 isolate_freepages(cc->zone, cc);
618
619 if (list_empty(&cc->freepages))
620 return NULL;
621 }
622
623 freepage = list_entry(cc->freepages.next, struct page, lru);
624 list_del(&freepage->lru);
625 cc->nr_freepages--;
626
627 return freepage;
628}
629
630/*
631 * We cannot control nr_migratepages and nr_freepages fully when migration is
632 * running as migrate_pages() has no knowledge of compact_control. When
633 * migration is complete, we count the number of pages on the lists by hand.
634 */
635static void update_nr_listpages(struct compact_control *cc)
636{
637 int nr_migratepages = 0;
638 int nr_freepages = 0;
639 struct page *page;
640
641 list_for_each_entry(page, &cc->migratepages, lru)
642 nr_migratepages++;
643 list_for_each_entry(page, &cc->freepages, lru)
644 nr_freepages++;
645
646 cc->nr_migratepages = nr_migratepages;
647 cc->nr_freepages = nr_freepages;
648}
649
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100650/* possible outcome of isolate_migratepages */
651typedef enum {
652 ISOLATE_ABORT, /* Abort compaction now */
653 ISOLATE_NONE, /* No pages isolated, continue scanning */
654 ISOLATE_SUCCESS, /* Pages isolated, migrate */
655} isolate_migrate_t;
656
657/*
658 * Isolate all pages that can be migrated from the block pointed to by
659 * the migrate scanner within compact_control.
660 */
661static isolate_migrate_t isolate_migratepages(struct zone *zone,
662 struct compact_control *cc)
663{
664 unsigned long low_pfn, end_pfn;
665
666 /* Do not scan outside zone boundaries */
667 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
668
669 /* Only scan within a pageblock boundary */
670 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
671
672 /* Do not cross the free scanner or scan within a memory hole */
673 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
674 cc->migrate_pfn = end_pfn;
675 return ISOLATE_NONE;
676 }
677
678 /* Perform the isolation */
679 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
Shaohua Li03dd8fe2012-10-08 16:32:27 -0700680 if (!low_pfn || cc->contended)
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100681 return ISOLATE_ABORT;
682
683 cc->migrate_pfn = low_pfn;
684
685 return ISOLATE_SUCCESS;
686}
687
Mel Gorman748446b2010-05-24 14:32:27 -0700688static int compact_finished(struct zone *zone,
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800689 struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -0700690{
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800691 unsigned long watermark;
Mel Gorman56de7262010-05-24 14:32:30 -0700692
Mel Gorman748446b2010-05-24 14:32:27 -0700693 if (fatal_signal_pending(current))
694 return COMPACT_PARTIAL;
695
696 /* Compaction run completes if the migrate and free scanner meet */
697 if (cc->free_pfn <= cc->migrate_pfn)
698 return COMPACT_COMPLETE;
699
Johannes Weiner82478fb2011-01-20 14:44:21 -0800700 /*
701 * order == -1 is expected when compacting via
702 * /proc/sys/vm/compact_memory
703 */
Mel Gorman56de7262010-05-24 14:32:30 -0700704 if (cc->order == -1)
705 return COMPACT_CONTINUE;
706
Michal Hocko3957c772011-06-15 15:08:25 -0700707 /* Compaction run is not finished if the watermark is not met */
708 watermark = low_wmark_pages(zone);
709 watermark += (1 << cc->order);
710
711 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
712 return COMPACT_CONTINUE;
713
Mel Gorman56de7262010-05-24 14:32:30 -0700714 /* Direct compactor: Is a suitable page free? */
Mel Gormanc74068b2012-10-08 16:29:12 -0700715 if (cc->page) {
716 /* Was a suitable page captured? */
717 if (*cc->page)
Mel Gorman56de7262010-05-24 14:32:30 -0700718 return COMPACT_PARTIAL;
Mel Gormanc74068b2012-10-08 16:29:12 -0700719 } else {
720 unsigned int order;
721 for (order = cc->order; order < MAX_ORDER; order++) {
722 struct free_area *area = &zone->free_area[cc->order];
723 /* Job done if page is free of the right migratetype */
724 if (!list_empty(&area->free_list[cc->migratetype]))
725 return COMPACT_PARTIAL;
Mel Gorman56de7262010-05-24 14:32:30 -0700726
Mel Gormanc74068b2012-10-08 16:29:12 -0700727 /* Job done if allocation would set block type */
728 if (cc->order >= pageblock_order && area->nr_free)
729 return COMPACT_PARTIAL;
730 }
Mel Gorman56de7262010-05-24 14:32:30 -0700731 }
732
Mel Gorman748446b2010-05-24 14:32:27 -0700733 return COMPACT_CONTINUE;
734}
735
Mel Gorman3e7d3442011-01-13 15:45:56 -0800736/*
737 * compaction_suitable: Is this suitable to run compaction on this zone now?
738 * Returns
739 * COMPACT_SKIPPED - If there are too few free pages for compaction
740 * COMPACT_PARTIAL - If the allocation would succeed without compaction
741 * COMPACT_CONTINUE - If compaction should run now
742 */
743unsigned long compaction_suitable(struct zone *zone, int order)
744{
745 int fragindex;
746 unsigned long watermark;
747
748 /*
Michal Hocko3957c772011-06-15 15:08:25 -0700749 * order == -1 is expected when compacting via
750 * /proc/sys/vm/compact_memory
751 */
752 if (order == -1)
753 return COMPACT_CONTINUE;
754
755 /*
Mel Gorman3e7d3442011-01-13 15:45:56 -0800756 * Watermarks for order-0 must be met for compaction. Note the 2UL.
757 * This is because during migration, copies of pages need to be
758 * allocated and for a short time, the footprint is higher
759 */
760 watermark = low_wmark_pages(zone) + (2UL << order);
761 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
762 return COMPACT_SKIPPED;
763
764 /*
765 * fragmentation index determines if allocation failures are due to
766 * low memory or external fragmentation
767 *
Shaohua Lia582a732011-06-15 15:08:49 -0700768 * index of -1000 implies allocations might succeed depending on
769 * watermarks
Mel Gorman3e7d3442011-01-13 15:45:56 -0800770 * index towards 0 implies failure is due to lack of memory
771 * index towards 1000 implies failure is due to fragmentation
772 *
773 * Only compact if a failure would be due to fragmentation.
774 */
775 fragindex = fragmentation_index(zone, order);
776 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
777 return COMPACT_SKIPPED;
778
Shaohua Lia582a732011-06-15 15:08:49 -0700779 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
780 0, 0))
Mel Gorman3e7d3442011-01-13 15:45:56 -0800781 return COMPACT_PARTIAL;
782
783 return COMPACT_CONTINUE;
784}
785
Mel Gorman748446b2010-05-24 14:32:27 -0700786static int compact_zone(struct zone *zone, struct compact_control *cc)
787{
788 int ret;
789
Mel Gorman3e7d3442011-01-13 15:45:56 -0800790 ret = compaction_suitable(zone, cc->order);
791 switch (ret) {
792 case COMPACT_PARTIAL:
793 case COMPACT_SKIPPED:
794 /* Compaction is likely to fail */
795 return ret;
796 case COMPACT_CONTINUE:
797 /* Fall through to compaction */
798 ;
799 }
800
Mel Gorman748446b2010-05-24 14:32:27 -0700801 /* Setup to move all movable pages to the end of the zone */
802 cc->migrate_pfn = zone->zone_start_pfn;
803 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
804 cc->free_pfn &= ~(pageblock_nr_pages-1);
805
806 migrate_prep_local();
807
808 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
809 unsigned long nr_migrate, nr_remaining;
Minchan Kim9d502c12011-03-22 16:30:39 -0700810 int err;
Mel Gorman748446b2010-05-24 14:32:27 -0700811
Mel Gormanf9e35b32011-06-15 15:08:52 -0700812 switch (isolate_migratepages(zone, cc)) {
813 case ISOLATE_ABORT:
814 ret = COMPACT_PARTIAL;
Shaohua Li03dd8fe2012-10-08 16:32:27 -0700815 putback_lru_pages(&cc->migratepages);
816 cc->nr_migratepages = 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700817 goto out;
818 case ISOLATE_NONE:
Mel Gorman748446b2010-05-24 14:32:27 -0700819 continue;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700820 case ISOLATE_SUCCESS:
821 ;
822 }
Mel Gorman748446b2010-05-24 14:32:27 -0700823
824 nr_migrate = cc->nr_migratepages;
Minchan Kim9d502c12011-03-22 16:30:39 -0700825 err = migrate_pages(&cc->migratepages, compaction_alloc,
Mel Gorman7f0f2492011-01-13 15:45:58 -0800826 (unsigned long)cc, false,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800827 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
Mel Gorman748446b2010-05-24 14:32:27 -0700828 update_nr_listpages(cc);
829 nr_remaining = cc->nr_migratepages;
830
831 count_vm_event(COMPACTBLOCKS);
832 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
833 if (nr_remaining)
834 count_vm_events(COMPACTPAGEFAILED, nr_remaining);
Mel Gormanb7aba692011-01-13 15:45:54 -0800835 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
836 nr_remaining);
Mel Gorman748446b2010-05-24 14:32:27 -0700837
838 /* Release LRU pages not migrated */
Minchan Kim9d502c12011-03-22 16:30:39 -0700839 if (err) {
Mel Gorman748446b2010-05-24 14:32:27 -0700840 putback_lru_pages(&cc->migratepages);
841 cc->nr_migratepages = 0;
David Rientjes7a08b442012-07-11 14:02:13 -0700842 if (err == -ENOMEM) {
843 ret = COMPACT_PARTIAL;
844 goto out;
845 }
Mel Gorman748446b2010-05-24 14:32:27 -0700846 }
Mel Gormanc74068b2012-10-08 16:29:12 -0700847
848 /* Capture a page now if it is a suitable size */
849 compact_capture_page(cc);
Mel Gorman748446b2010-05-24 14:32:27 -0700850 }
851
Mel Gormanf9e35b32011-06-15 15:08:52 -0700852out:
Mel Gorman748446b2010-05-24 14:32:27 -0700853 /* Release free pages and check accounting */
854 cc->nr_freepages -= release_freepages(&cc->freepages);
855 VM_BUG_ON(cc->nr_freepages != 0);
856
857 return ret;
858}
Mel Gorman76ab0f52010-05-24 14:32:28 -0700859
Kyungmin Parkd43a87e2011-10-31 17:09:08 -0700860static unsigned long compact_zone_order(struct zone *zone,
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800861 int order, gfp_t gfp_mask,
Mel Gormanc74068b2012-10-08 16:29:12 -0700862 bool sync, bool *contended,
863 struct page **page)
Mel Gorman56de7262010-05-24 14:32:30 -0700864{
Shaohua Li03dd8fe2012-10-08 16:32:27 -0700865 unsigned long ret;
Mel Gorman56de7262010-05-24 14:32:30 -0700866 struct compact_control cc = {
867 .nr_freepages = 0,
868 .nr_migratepages = 0,
869 .order = order,
870 .migratetype = allocflags_to_migratetype(gfp_mask),
871 .zone = zone,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800872 .sync = sync,
Mel Gormanc74068b2012-10-08 16:29:12 -0700873 .page = page,
Mel Gorman56de7262010-05-24 14:32:30 -0700874 };
875 INIT_LIST_HEAD(&cc.freepages);
876 INIT_LIST_HEAD(&cc.migratepages);
877
Shaohua Li03dd8fe2012-10-08 16:32:27 -0700878 ret = compact_zone(zone, &cc);
879
880 VM_BUG_ON(!list_empty(&cc.freepages));
881 VM_BUG_ON(!list_empty(&cc.migratepages));
882
883 *contended = cc.contended;
884 return ret;
Mel Gorman56de7262010-05-24 14:32:30 -0700885}
886
Mel Gorman5e771902010-05-24 14:32:31 -0700887int sysctl_extfrag_threshold = 500;
888
Mel Gorman56de7262010-05-24 14:32:30 -0700889/**
890 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
891 * @zonelist: The zonelist used for the current allocation
892 * @order: The order of the current allocation
893 * @gfp_mask: The GFP mask of the current allocation
894 * @nodemask: The allowed nodes to allocate from
Mel Gorman77f1fe62011-01-13 15:45:57 -0800895 * @sync: Whether migration is synchronous or not
Mel Gorman8a063192012-10-08 16:32:31 -0700896 * @contended: Return value that is true if compaction was aborted due to lock contention
897 * @page: Optionally capture a free page of the requested order during compaction
Mel Gorman56de7262010-05-24 14:32:30 -0700898 *
899 * This is the main entry point for direct page compaction.
900 */
901unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800902 int order, gfp_t gfp_mask, nodemask_t *nodemask,
Mel Gormanc74068b2012-10-08 16:29:12 -0700903 bool sync, bool *contended, struct page **page)
Mel Gorman56de7262010-05-24 14:32:30 -0700904{
905 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
906 int may_enter_fs = gfp_mask & __GFP_FS;
907 int may_perform_io = gfp_mask & __GFP_IO;
Mel Gorman56de7262010-05-24 14:32:30 -0700908 struct zoneref *z;
909 struct zone *zone;
910 int rc = COMPACT_SKIPPED;
Bartlomiej Zolnierkiewicz850bf192012-10-08 16:32:05 -0700911 int alloc_flags = 0;
Mel Gorman56de7262010-05-24 14:32:30 -0700912
Mel Gormanbc337a92012-10-08 16:29:09 -0700913 /* Check if the GFP flags allow compaction */
Andrea Arcangelic5a73c32011-01-13 15:47:11 -0800914 if (!order || !may_enter_fs || !may_perform_io)
Mel Gorman56de7262010-05-24 14:32:30 -0700915 return rc;
916
917 count_vm_event(COMPACTSTALL);
918
Bartlomiej Zolnierkiewicz850bf192012-10-08 16:32:05 -0700919#ifdef CONFIG_CMA
920 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
921 alloc_flags |= ALLOC_CMA;
922#endif
Mel Gorman56de7262010-05-24 14:32:30 -0700923 /* Compact each zone in the list */
924 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
925 nodemask) {
Mel Gorman56de7262010-05-24 14:32:30 -0700926 int status;
927
Mel Gorman0ba33882012-08-21 16:16:17 -0700928 status = compact_zone_order(zone, order, gfp_mask, sync,
Mel Gormanc74068b2012-10-08 16:29:12 -0700929 contended, page);
Mel Gorman56de7262010-05-24 14:32:30 -0700930 rc = max(status, rc);
931
Mel Gorman3e7d3442011-01-13 15:45:56 -0800932 /* If a normal allocation would succeed, stop compacting */
Bartlomiej Zolnierkiewicz850bf192012-10-08 16:32:05 -0700933 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
934 alloc_flags))
Mel Gorman56de7262010-05-24 14:32:30 -0700935 break;
936 }
937
938 return rc;
939}
940
941
Mel Gorman76ab0f52010-05-24 14:32:28 -0700942/* Compact all zones within a node */
Rik van Riel7be62de2012-03-21 16:33:52 -0700943static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
Mel Gorman76ab0f52010-05-24 14:32:28 -0700944{
945 int zoneid;
Mel Gorman76ab0f52010-05-24 14:32:28 -0700946 struct zone *zone;
947
Mel Gorman76ab0f52010-05-24 14:32:28 -0700948 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
Mel Gorman76ab0f52010-05-24 14:32:28 -0700949
950 zone = &pgdat->node_zones[zoneid];
951 if (!populated_zone(zone))
952 continue;
953
Rik van Riel7be62de2012-03-21 16:33:52 -0700954 cc->nr_freepages = 0;
955 cc->nr_migratepages = 0;
956 cc->zone = zone;
957 INIT_LIST_HEAD(&cc->freepages);
958 INIT_LIST_HEAD(&cc->migratepages);
Mel Gorman76ab0f52010-05-24 14:32:28 -0700959
Dan Carpenteraad6ec32012-03-21 16:33:54 -0700960 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
Rik van Riel7be62de2012-03-21 16:33:52 -0700961 compact_zone(zone, cc);
Mel Gorman76ab0f52010-05-24 14:32:28 -0700962
Rik van Rielaff62242012-03-21 16:33:52 -0700963 if (cc->order > 0) {
964 int ok = zone_watermark_ok(zone, cc->order,
965 low_wmark_pages(zone), 0, 0);
Minchan Kim57bb21c2012-08-21 16:16:03 -0700966 if (ok && cc->order >= zone->compact_order_failed)
Rik van Rielaff62242012-03-21 16:33:52 -0700967 zone->compact_order_failed = cc->order + 1;
968 /* Currently async compaction is never deferred. */
969 else if (!ok && cc->sync)
970 defer_compaction(zone, cc->order);
971 }
972
Rik van Riel7be62de2012-03-21 16:33:52 -0700973 VM_BUG_ON(!list_empty(&cc->freepages));
974 VM_BUG_ON(!list_empty(&cc->migratepages));
Mel Gorman76ab0f52010-05-24 14:32:28 -0700975 }
976
977 return 0;
978}
979
Rik van Riel7be62de2012-03-21 16:33:52 -0700980int compact_pgdat(pg_data_t *pgdat, int order)
981{
982 struct compact_control cc = {
983 .order = order,
984 .sync = false,
Mel Gormanc74068b2012-10-08 16:29:12 -0700985 .page = NULL,
Rik van Riel7be62de2012-03-21 16:33:52 -0700986 };
987
988 return __compact_pgdat(pgdat, &cc);
989}
990
991static int compact_node(int nid)
992{
Rik van Riel7be62de2012-03-21 16:33:52 -0700993 struct compact_control cc = {
994 .order = -1,
995 .sync = true,
Mel Gormanc74068b2012-10-08 16:29:12 -0700996 .page = NULL,
Rik van Riel7be62de2012-03-21 16:33:52 -0700997 };
998
Hugh Dickins8575ec22012-03-21 16:33:53 -0700999 return __compact_pgdat(NODE_DATA(nid), &cc);
Rik van Riel7be62de2012-03-21 16:33:52 -07001000}
1001
Mel Gorman76ab0f52010-05-24 14:32:28 -07001002/* Compact all nodes in the system */
Jason Liuc0b96522013-01-11 14:31:47 -08001003static void compact_nodes(void)
Mel Gorman76ab0f52010-05-24 14:32:28 -07001004{
1005 int nid;
1006
Hugh Dickins8575ec22012-03-21 16:33:53 -07001007 /* Flush pending updates to the LRU lists */
1008 lru_add_drain_all();
1009
Mel Gorman76ab0f52010-05-24 14:32:28 -07001010 for_each_online_node(nid)
1011 compact_node(nid);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001012}
1013
1014/* The written value is actually unused, all memory is compacted */
1015int sysctl_compact_memory;
1016
1017/* This is the entry point for compacting all nodes via /proc/sys/vm */
1018int sysctl_compaction_handler(struct ctl_table *table, int write,
1019 void __user *buffer, size_t *length, loff_t *ppos)
1020{
1021 if (write)
Jason Liuc0b96522013-01-11 14:31:47 -08001022 compact_nodes();
Mel Gorman76ab0f52010-05-24 14:32:28 -07001023
1024 return 0;
1025}
Mel Gormaned4a6d72010-05-24 14:32:29 -07001026
Mel Gorman5e771902010-05-24 14:32:31 -07001027int sysctl_extfrag_handler(struct ctl_table *table, int write,
1028 void __user *buffer, size_t *length, loff_t *ppos)
1029{
1030 proc_dointvec_minmax(table, write, buffer, length, ppos);
1031
1032 return 0;
1033}
1034
Mel Gormaned4a6d72010-05-24 14:32:29 -07001035#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
Kay Sievers10fbcf42011-12-21 14:48:43 -08001036ssize_t sysfs_compact_node(struct device *dev,
1037 struct device_attribute *attr,
Mel Gormaned4a6d72010-05-24 14:32:29 -07001038 const char *buf, size_t count)
1039{
Hugh Dickins8575ec22012-03-21 16:33:53 -07001040 int nid = dev->id;
1041
1042 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1043 /* Flush pending updates to the LRU lists */
1044 lru_add_drain_all();
1045
1046 compact_node(nid);
1047 }
Mel Gormaned4a6d72010-05-24 14:32:29 -07001048
1049 return count;
1050}
Kay Sievers10fbcf42011-12-21 14:48:43 -08001051static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001052
1053int compaction_register_node(struct node *node)
1054{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001055 return device_create_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001056}
1057
1058void compaction_unregister_node(struct node *node)
1059{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001060 return device_remove_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001061}
1062#endif /* CONFIG_SYSFS && CONFIG_NUMA */
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +01001063
1064#endif /* CONFIG_COMPACTION */