blob: 78bf8e24d31a3734f0662771ea8b78b4ea7669a1 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070015#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070016#include <linux/sysfs.h>
Mel Gorman748446b2010-05-24 14:32:27 -070017#include "internal.h"
18
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +010019#if defined CONFIG_COMPACTION || defined CONFIG_CMA
20
Mel Gormanb7aba692011-01-13 15:45:54 -080021#define CREATE_TRACE_POINTS
22#include <trace/events/compaction.h>
23
Mel Gorman748446b2010-05-24 14:32:27 -070024static unsigned long release_freepages(struct list_head *freelist)
25{
26 struct page *page, *next;
27 unsigned long count = 0;
28
29 list_for_each_entry_safe(page, next, freelist, lru) {
30 list_del(&page->lru);
31 __free_page(page);
32 count++;
33 }
34
35 return count;
36}
37
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +010038static void map_pages(struct list_head *list)
39{
40 struct page *page;
41
42 list_for_each_entry(page, list, lru) {
43 arch_alloc_page(page, 0);
44 kernel_map_pages(page, 1, 1);
45 }
46}
47
Michal Nazarewiczd4158d22011-12-29 13:09:50 +010048static inline bool migrate_async_suitable(int migratetype)
49{
50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
51}
52
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +010053/*
Mel Gorman0ba33882012-08-21 16:16:17 -070054 * Compaction requires the taking of some coarse locks that are potentially
55 * very heavily contended. Check if the process needs to be scheduled or
56 * if the lock is contended. For async compaction, back out in the event
57 * if contention is severe. For sync compaction, schedule.
58 *
59 * Returns true if the lock is held.
60 * Returns false if the lock is released and compaction should abort
61 */
62static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
63 bool locked, struct compact_control *cc)
64{
65 if (need_resched() || spin_is_contended(lock)) {
66 if (locked) {
67 spin_unlock_irqrestore(lock, *flags);
68 locked = false;
69 }
70
71 /* async aborts if taking too long or contended */
72 if (!cc->sync) {
73 if (cc->contended)
74 *cc->contended = true;
75 return false;
76 }
77
78 cond_resched();
79 if (fatal_signal_pending(current))
80 return false;
81 }
82
83 if (!locked)
84 spin_lock_irqsave(lock, *flags);
85 return true;
86}
87
88static inline bool compact_trylock_irqsave(spinlock_t *lock,
89 unsigned long *flags, struct compact_control *cc)
90{
91 return compact_checklock_irqsave(lock, flags, false, cc);
92}
93
Mel Gormanc74068b2012-10-08 16:29:12 -070094static void compact_capture_page(struct compact_control *cc)
95{
96 unsigned long flags;
97 int mtype, mtype_low, mtype_high;
98
99 if (!cc->page || *cc->page)
100 return;
101
102 /*
103 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
104 * regardless of the migratetype of the freelist is is captured from.
105 * This is fine because the order for a high-order MIGRATE_MOVABLE
106 * allocation is typically at least a pageblock size and overall
107 * fragmentation is not impaired. Other allocation types must
108 * capture pages from their own migratelist because otherwise they
109 * could pollute other pageblocks like MIGRATE_MOVABLE with
110 * difficult to move pages and making fragmentation worse overall.
111 */
112 if (cc->migratetype == MIGRATE_MOVABLE) {
113 mtype_low = 0;
114 mtype_high = MIGRATE_PCPTYPES;
115 } else {
116 mtype_low = cc->migratetype;
117 mtype_high = cc->migratetype + 1;
118 }
119
120 /* Speculatively examine the free lists without zone lock */
121 for (mtype = mtype_low; mtype < mtype_high; mtype++) {
122 int order;
123 for (order = cc->order; order < MAX_ORDER; order++) {
124 struct page *page;
125 struct free_area *area;
126 area = &(cc->zone->free_area[order]);
127 if (list_empty(&area->free_list[mtype]))
128 continue;
129
130 /* Take the lock and attempt capture of the page */
131 if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
132 return;
133 if (!list_empty(&area->free_list[mtype])) {
134 page = list_entry(area->free_list[mtype].next,
135 struct page, lru);
136 if (capture_free_page(page, cc->order, mtype)) {
137 spin_unlock_irqrestore(&cc->zone->lock,
138 flags);
139 *cc->page = page;
140 return;
141 }
142 }
143 spin_unlock_irqrestore(&cc->zone->lock, flags);
144 }
145 }
146}
147
Mel Gorman0ba33882012-08-21 16:16:17 -0700148/*
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100149 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
150 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
151 * pages inside of the pageblock (even though it may still end up isolating
152 * some pages).
153 */
154static unsigned long isolate_freepages_block(unsigned long blockpfn,
155 unsigned long end_pfn,
156 struct list_head *freelist,
157 bool strict)
Mel Gorman748446b2010-05-24 14:32:27 -0700158{
Mel Gormanb7aba692011-01-13 15:45:54 -0800159 int nr_scanned = 0, total_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700160 struct page *cursor;
161
Mel Gorman748446b2010-05-24 14:32:27 -0700162 cursor = pfn_to_page(blockpfn);
163
164 /* Isolate free pages. This assumes the block is valid */
165 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
166 int isolated, i;
167 struct page *page = cursor;
168
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100169 if (!pfn_valid_within(blockpfn)) {
170 if (strict)
171 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700172 continue;
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100173 }
Mel Gormanb7aba692011-01-13 15:45:54 -0800174 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700175
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100176 if (!PageBuddy(page)) {
177 if (strict)
178 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700179 continue;
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100180 }
Mel Gorman748446b2010-05-24 14:32:27 -0700181
182 /* Found a free page, break it into order-0 pages */
183 isolated = split_free_page(page);
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100184 if (!isolated && strict)
185 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700186 total_isolated += isolated;
187 for (i = 0; i < isolated; i++) {
188 list_add(&page->lru, freelist);
189 page++;
190 }
191
192 /* If a page was split, advance to the end of it */
193 if (isolated) {
194 blockpfn += isolated - 1;
195 cursor += isolated - 1;
196 }
197 }
198
Mel Gormanb7aba692011-01-13 15:45:54 -0800199 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700200 return total_isolated;
201}
202
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100203/**
204 * isolate_freepages_range() - isolate free pages.
205 * @start_pfn: The first PFN to start isolating.
206 * @end_pfn: The one-past-last PFN.
207 *
208 * Non-free pages, invalid PFNs, or zone boundaries within the
209 * [start_pfn, end_pfn) range are considered errors, cause function to
210 * undo its actions and return zero.
211 *
212 * Otherwise, function returns one-past-the-last PFN of isolated page
213 * (which may be greater then end_pfn if end fell in a middle of
214 * a free page).
215 */
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100216unsigned long
Michal Nazarewicz61ee2fc2012-01-30 13:24:03 +0100217isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
218{
219 unsigned long isolated, pfn, block_end_pfn, flags;
220 struct zone *zone = NULL;
221 LIST_HEAD(freelist);
222
223 if (pfn_valid(start_pfn))
224 zone = page_zone(pfn_to_page(start_pfn));
225
226 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
227 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
228 break;
229
230 /*
231 * On subsequent iterations ALIGN() is actually not needed,
232 * but we keep it that we not to complicate the code.
233 */
234 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
235 block_end_pfn = min(block_end_pfn, end_pfn);
236
237 spin_lock_irqsave(&zone->lock, flags);
238 isolated = isolate_freepages_block(pfn, block_end_pfn,
239 &freelist, true);
240 spin_unlock_irqrestore(&zone->lock, flags);
241
242 /*
243 * In strict mode, isolate_freepages_block() returns 0 if
244 * there are any holes in the block (ie. invalid PFNs or
245 * non-free pages).
246 */
247 if (!isolated)
248 break;
249
250 /*
251 * If we managed to isolate pages, it is always (1 << n) *
252 * pageblock_nr_pages for some non-negative n. (Max order
253 * page may span two pageblocks).
254 */
255 }
256
257 /* split_free_page does not map the pages */
258 map_pages(&freelist);
259
260 if (pfn < end_pfn) {
261 /* Loop terminated early, cleanup. */
262 release_freepages(&freelist);
263 return 0;
264 }
265
266 /* We don't use freelists for anything. */
267 return pfn;
268}
269
Mel Gorman748446b2010-05-24 14:32:27 -0700270/* Update the number of anon and file isolated pages in the zone */
Mel Gorman0ba33882012-08-21 16:16:17 -0700271static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -0700272{
273 struct page *page;
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700274 unsigned int count[2] = { 0, };
Mel Gorman748446b2010-05-24 14:32:27 -0700275
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700276 list_for_each_entry(page, &cc->migratepages, lru)
277 count[!!page_is_file_cache(page)]++;
Mel Gorman748446b2010-05-24 14:32:27 -0700278
Mel Gorman0ba33882012-08-21 16:16:17 -0700279 /* If locked we can use the interrupt unsafe versions */
280 if (locked) {
281 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
282 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
283 } else {
284 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
285 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
286 }
Mel Gorman748446b2010-05-24 14:32:27 -0700287}
288
289/* Similar to reclaim, but different enough that they don't share logic */
290static bool too_many_isolated(struct zone *zone)
291{
Minchan Kimbc693042010-09-09 16:38:00 -0700292 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700293
294 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
295 zone_page_state(zone, NR_INACTIVE_ANON);
Minchan Kimbc693042010-09-09 16:38:00 -0700296 active = zone_page_state(zone, NR_ACTIVE_FILE) +
297 zone_page_state(zone, NR_ACTIVE_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700298 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
299 zone_page_state(zone, NR_ISOLATED_ANON);
300
Minchan Kimbc693042010-09-09 16:38:00 -0700301 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700302}
303
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100304/**
305 * isolate_migratepages_range() - isolate all migrate-able pages in range.
306 * @zone: Zone pages are in.
307 * @cc: Compaction control structure.
308 * @low_pfn: The first PFN of the range.
309 * @end_pfn: The one-past-the-last PFN of the range.
310 *
311 * Isolate all pages that can be migrated from the range specified by
312 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
313 * pending), otherwise PFN of the first page that was not scanned
314 * (which may be both less, equal to or more then end_pfn).
315 *
316 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
317 * zero.
318 *
319 * Apart from cc->migratepages and cc->nr_migratetypes this function
320 * does not modify any cc's fields, in particular it does not modify
321 * (or read for that matter) cc->migrate_pfn.
Mel Gorman748446b2010-05-24 14:32:27 -0700322 */
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100323unsigned long
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100324isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
325 unsigned long low_pfn, unsigned long end_pfn)
Mel Gorman748446b2010-05-24 14:32:27 -0700326{
Mel Gorman9927af742011-01-13 15:45:59 -0800327 unsigned long last_pageblock_nr = 0, pageblock_nr;
Mel Gormanb7aba692011-01-13 15:45:54 -0800328 unsigned long nr_scanned = 0, nr_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700329 struct list_head *migratelist = &cc->migratepages;
Konstantin Khlebnikovfa168092012-05-29 15:06:54 -0700330 isolate_mode_t mode = 0;
Mel Gorman0ba33882012-08-21 16:16:17 -0700331 unsigned long flags;
332 bool locked;
Mel Gorman748446b2010-05-24 14:32:27 -0700333
Mel Gorman748446b2010-05-24 14:32:27 -0700334 /*
335 * Ensure that there are not too many pages isolated from the LRU
336 * list by either parallel reclaimers or compaction. If there are,
337 * delay for some time until fewer pages are isolated
338 */
339 while (unlikely(too_many_isolated(zone))) {
Mel Gormanf9e35b32011-06-15 15:08:52 -0700340 /* async migration should just abort */
341 if (!cc->sync)
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100342 return 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700343
Mel Gorman748446b2010-05-24 14:32:27 -0700344 congestion_wait(BLK_RW_ASYNC, HZ/10);
345
346 if (fatal_signal_pending(current))
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100347 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700348 }
349
350 /* Time to isolate some pages for migration */
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700351 cond_resched();
Mel Gorman0ba33882012-08-21 16:16:17 -0700352 spin_lock_irqsave(&zone->lru_lock, flags);
353 locked = true;
Mel Gorman748446b2010-05-24 14:32:27 -0700354 for (; low_pfn < end_pfn; low_pfn++) {
355 struct page *page;
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700356
357 /* give a chance to irqs before checking need_resched() */
358 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
Mel Gorman0ba33882012-08-21 16:16:17 -0700359 spin_unlock_irqrestore(&zone->lru_lock, flags);
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700360 locked = false;
361 }
Mel Gorman0ba33882012-08-21 16:16:17 -0700362
363 /* Check if it is ok to still hold the lock */
364 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
365 locked, cc);
366 if (!locked)
367 break;
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700368
Mel Gorman0bf380b2012-02-03 15:37:18 -0800369 /*
370 * migrate_pfn does not necessarily start aligned to a
371 * pageblock. Ensure that pfn_valid is called when moving
372 * into a new MAX_ORDER_NR_PAGES range in case of large
373 * memory holes within the zone
374 */
375 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
376 if (!pfn_valid(low_pfn)) {
377 low_pfn += MAX_ORDER_NR_PAGES - 1;
378 continue;
379 }
380 }
381
Mel Gorman748446b2010-05-24 14:32:27 -0700382 if (!pfn_valid_within(low_pfn))
383 continue;
Mel Gormanb7aba692011-01-13 15:45:54 -0800384 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700385
Mel Gormandc908602012-02-08 17:13:38 -0800386 /*
387 * Get the page and ensure the page is within the same zone.
388 * See the comment in isolate_freepages about overlapping
389 * nodes. It is deliberate that the new zone lock is not taken
390 * as memory compaction should not move pages between nodes.
391 */
Mel Gorman748446b2010-05-24 14:32:27 -0700392 page = pfn_to_page(low_pfn);
Mel Gormandc908602012-02-08 17:13:38 -0800393 if (page_zone(page) != zone)
394 continue;
395
396 /* Skip if free */
Mel Gorman748446b2010-05-24 14:32:27 -0700397 if (PageBuddy(page))
398 continue;
399
Mel Gorman9927af742011-01-13 15:45:59 -0800400 /*
401 * For async migration, also only scan in MOVABLE blocks. Async
402 * migration is optimistic to see if the minimum amount of work
403 * satisfies the allocation
404 */
405 pageblock_nr = low_pfn >> pageblock_order;
406 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
Michal Nazarewiczd4158d22011-12-29 13:09:50 +0100407 !migrate_async_suitable(get_pageblock_migratetype(page))) {
Mel Gorman9927af742011-01-13 15:45:59 -0800408 low_pfn += pageblock_nr_pages;
409 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
410 last_pageblock_nr = pageblock_nr;
411 continue;
412 }
413
Andrea Arcangelibc835012011-01-13 15:47:08 -0800414 if (!PageLRU(page))
415 continue;
416
417 /*
418 * PageLRU is set, and lru_lock excludes isolation,
419 * splitting and collapsing (collapsing has already
420 * happened if PageLRU is set).
421 */
422 if (PageTransHuge(page)) {
423 low_pfn += (1 << compound_order(page)) - 1;
424 continue;
425 }
426
Mel Gormanc8244932012-01-12 17:19:38 -0800427 if (!cc->sync)
428 mode |= ISOLATE_ASYNC_MIGRATE;
429
Mel Gorman748446b2010-05-24 14:32:27 -0700430 /* Try isolate the page */
Konstantin Khlebnikovfa168092012-05-29 15:06:54 -0700431 if (__isolate_lru_page(page, mode) != 0)
Mel Gorman748446b2010-05-24 14:32:27 -0700432 continue;
433
Andrea Arcangelibc835012011-01-13 15:47:08 -0800434 VM_BUG_ON(PageTransCompound(page));
435
Mel Gorman748446b2010-05-24 14:32:27 -0700436 /* Successfully isolated */
437 del_page_from_lru_list(zone, page, page_lru(page));
438 list_add(&page->lru, migratelist);
Mel Gorman748446b2010-05-24 14:32:27 -0700439 cc->nr_migratepages++;
Mel Gormanb7aba692011-01-13 15:45:54 -0800440 nr_isolated++;
Mel Gorman748446b2010-05-24 14:32:27 -0700441
442 /* Avoid isolating too much */
Hillf Danton31b83842012-01-10 15:07:59 -0800443 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
444 ++low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700445 break;
Hillf Danton31b83842012-01-10 15:07:59 -0800446 }
Mel Gorman748446b2010-05-24 14:32:27 -0700447 }
448
Mel Gorman0ba33882012-08-21 16:16:17 -0700449 acct_isolated(zone, locked, cc);
Mel Gorman748446b2010-05-24 14:32:27 -0700450
Mel Gorman0ba33882012-08-21 16:16:17 -0700451 if (locked)
452 spin_unlock_irqrestore(&zone->lru_lock, flags);
Mel Gorman748446b2010-05-24 14:32:27 -0700453
Mel Gormanb7aba692011-01-13 15:45:54 -0800454 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
455
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100456 return low_pfn;
457}
458
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100459#endif /* CONFIG_COMPACTION || CONFIG_CMA */
460#ifdef CONFIG_COMPACTION
461
462/* Returns true if the page is within a block suitable for migration to */
463static bool suitable_migration_target(struct page *page)
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100464{
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100465
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100466 int migratetype = get_pageblock_migratetype(page);
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100467
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100468 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
469 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
470 return false;
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100471
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100472 /* If the page is a large free page, then allow migration */
473 if (PageBuddy(page) && page_order(page) >= pageblock_order)
474 return true;
475
Michal Nazarewiczd4158d22011-12-29 13:09:50 +0100476 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
477 if (migrate_async_suitable(migratetype))
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100478 return true;
479
480 /* Otherwise skip the block */
481 return false;
482}
483
484/*
485 * Based on information in the current compact_control, find blocks
486 * suitable for isolating free pages from and then isolate them.
487 */
488static void isolate_freepages(struct zone *zone,
489 struct compact_control *cc)
490{
491 struct page *page;
492 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
493 unsigned long flags;
494 int nr_freepages = cc->nr_freepages;
495 struct list_head *freelist = &cc->freepages;
496
497 /*
498 * Initialise the free scanner. The starting point is where we last
499 * scanned from (or the end of the zone if starting). The low point
500 * is the end of the pageblock the migration scanner is using.
501 */
502 pfn = cc->free_pfn;
503 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
504
505 /*
506 * Take care that if the migration scanner is at the end of the zone
507 * that the free scanner does not accidentally move to the next zone
508 * in the next isolation cycle.
509 */
510 high_pfn = min(low_pfn, pfn);
511
512 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
513
514 /*
515 * Isolate free pages until enough are available to migrate the
516 * pages on cc->migratepages. We stop searching if the migrate
517 * and free page scanners meet or enough free pages are isolated.
518 */
519 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
520 pfn -= pageblock_nr_pages) {
521 unsigned long isolated;
522
523 if (!pfn_valid(pfn))
524 continue;
525
526 /*
527 * Check for overlapping nodes/zones. It's possible on some
528 * configurations to have a setup like
529 * node0 node1 node0
530 * i.e. it's possible that all pages within a zones range of
531 * pages do not belong to a single zone.
532 */
533 page = pfn_to_page(pfn);
534 if (page_zone(page) != zone)
535 continue;
536
537 /* Check the block is suitable for migration */
538 if (!suitable_migration_target(page))
539 continue;
540
541 /*
542 * Found a block suitable for isolating free pages from. Now
543 * we disabled interrupts, double check things are ok and
544 * isolate the pages. This is to minimise the time IRQs
545 * are disabled
546 */
547 isolated = 0;
Mel Gorman0ba33882012-08-21 16:16:17 -0700548
549 /*
550 * The zone lock must be held to isolate freepages. This
551 * unfortunately this is a very coarse lock and can be
552 * heavily contended if there are parallel allocations
553 * or parallel compactions. For async compaction do not
554 * spin on the lock
555 */
556 if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
557 break;
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100558 if (suitable_migration_target(page)) {
559 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
560 isolated = isolate_freepages_block(pfn, end_pfn,
561 freelist, false);
562 nr_freepages += isolated;
563 }
564 spin_unlock_irqrestore(&zone->lock, flags);
565
566 /*
567 * Record the highest PFN we isolated pages from. When next
568 * looking for free pages, the search will restart here as
569 * page migration may have returned some pages to the allocator
570 */
571 if (isolated)
572 high_pfn = max(high_pfn, pfn);
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100573 }
574
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100575 /* split_free_page does not map the pages */
576 map_pages(freelist);
Michal Nazarewicza196a6c2012-01-30 13:16:26 +0100577
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100578 cc->free_pfn = high_pfn;
579 cc->nr_freepages = nr_freepages;
Mel Gorman748446b2010-05-24 14:32:27 -0700580}
581
582/*
583 * This is a migrate-callback that "allocates" freepages by taking pages
584 * from the isolated freelists in the block we are migrating to.
585 */
586static struct page *compaction_alloc(struct page *migratepage,
587 unsigned long data,
588 int **result)
589{
590 struct compact_control *cc = (struct compact_control *)data;
591 struct page *freepage;
592
593 /* Isolate free pages if necessary */
594 if (list_empty(&cc->freepages)) {
595 isolate_freepages(cc->zone, cc);
596
597 if (list_empty(&cc->freepages))
598 return NULL;
599 }
600
601 freepage = list_entry(cc->freepages.next, struct page, lru);
602 list_del(&freepage->lru);
603 cc->nr_freepages--;
604
605 return freepage;
606}
607
608/*
609 * We cannot control nr_migratepages and nr_freepages fully when migration is
610 * running as migrate_pages() has no knowledge of compact_control. When
611 * migration is complete, we count the number of pages on the lists by hand.
612 */
613static void update_nr_listpages(struct compact_control *cc)
614{
615 int nr_migratepages = 0;
616 int nr_freepages = 0;
617 struct page *page;
618
619 list_for_each_entry(page, &cc->migratepages, lru)
620 nr_migratepages++;
621 list_for_each_entry(page, &cc->freepages, lru)
622 nr_freepages++;
623
624 cc->nr_migratepages = nr_migratepages;
625 cc->nr_freepages = nr_freepages;
626}
627
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +0100628/* possible outcome of isolate_migratepages */
629typedef enum {
630 ISOLATE_ABORT, /* Abort compaction now */
631 ISOLATE_NONE, /* No pages isolated, continue scanning */
632 ISOLATE_SUCCESS, /* Pages isolated, migrate */
633} isolate_migrate_t;
634
635/*
636 * Isolate all pages that can be migrated from the block pointed to by
637 * the migrate scanner within compact_control.
638 */
639static isolate_migrate_t isolate_migratepages(struct zone *zone,
640 struct compact_control *cc)
641{
642 unsigned long low_pfn, end_pfn;
643
644 /* Do not scan outside zone boundaries */
645 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
646
647 /* Only scan within a pageblock boundary */
648 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
649
650 /* Do not cross the free scanner or scan within a memory hole */
651 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
652 cc->migrate_pfn = end_pfn;
653 return ISOLATE_NONE;
654 }
655
656 /* Perform the isolation */
657 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
658 if (!low_pfn)
659 return ISOLATE_ABORT;
660
661 cc->migrate_pfn = low_pfn;
662
663 return ISOLATE_SUCCESS;
664}
665
Mel Gorman748446b2010-05-24 14:32:27 -0700666static int compact_finished(struct zone *zone,
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800667 struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -0700668{
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800669 unsigned long watermark;
Mel Gorman56de7262010-05-24 14:32:30 -0700670
Mel Gorman748446b2010-05-24 14:32:27 -0700671 if (fatal_signal_pending(current))
672 return COMPACT_PARTIAL;
673
674 /* Compaction run completes if the migrate and free scanner meet */
675 if (cc->free_pfn <= cc->migrate_pfn)
676 return COMPACT_COMPLETE;
677
Johannes Weiner82478fb2011-01-20 14:44:21 -0800678 /*
679 * order == -1 is expected when compacting via
680 * /proc/sys/vm/compact_memory
681 */
Mel Gorman56de7262010-05-24 14:32:30 -0700682 if (cc->order == -1)
683 return COMPACT_CONTINUE;
684
Michal Hocko3957c772011-06-15 15:08:25 -0700685 /* Compaction run is not finished if the watermark is not met */
686 watermark = low_wmark_pages(zone);
687 watermark += (1 << cc->order);
688
689 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
690 return COMPACT_CONTINUE;
691
Mel Gorman56de7262010-05-24 14:32:30 -0700692 /* Direct compactor: Is a suitable page free? */
Mel Gormanc74068b2012-10-08 16:29:12 -0700693 if (cc->page) {
694 /* Was a suitable page captured? */
695 if (*cc->page)
Mel Gorman56de7262010-05-24 14:32:30 -0700696 return COMPACT_PARTIAL;
Mel Gormanc74068b2012-10-08 16:29:12 -0700697 } else {
698 unsigned int order;
699 for (order = cc->order; order < MAX_ORDER; order++) {
700 struct free_area *area = &zone->free_area[cc->order];
701 /* Job done if page is free of the right migratetype */
702 if (!list_empty(&area->free_list[cc->migratetype]))
703 return COMPACT_PARTIAL;
Mel Gorman56de7262010-05-24 14:32:30 -0700704
Mel Gormanc74068b2012-10-08 16:29:12 -0700705 /* Job done if allocation would set block type */
706 if (cc->order >= pageblock_order && area->nr_free)
707 return COMPACT_PARTIAL;
708 }
Mel Gorman56de7262010-05-24 14:32:30 -0700709 }
710
Mel Gorman748446b2010-05-24 14:32:27 -0700711 return COMPACT_CONTINUE;
712}
713
Mel Gorman3e7d3442011-01-13 15:45:56 -0800714/*
715 * compaction_suitable: Is this suitable to run compaction on this zone now?
716 * Returns
717 * COMPACT_SKIPPED - If there are too few free pages for compaction
718 * COMPACT_PARTIAL - If the allocation would succeed without compaction
719 * COMPACT_CONTINUE - If compaction should run now
720 */
721unsigned long compaction_suitable(struct zone *zone, int order)
722{
723 int fragindex;
724 unsigned long watermark;
725
726 /*
Michal Hocko3957c772011-06-15 15:08:25 -0700727 * order == -1 is expected when compacting via
728 * /proc/sys/vm/compact_memory
729 */
730 if (order == -1)
731 return COMPACT_CONTINUE;
732
733 /*
Mel Gorman3e7d3442011-01-13 15:45:56 -0800734 * Watermarks for order-0 must be met for compaction. Note the 2UL.
735 * This is because during migration, copies of pages need to be
736 * allocated and for a short time, the footprint is higher
737 */
738 watermark = low_wmark_pages(zone) + (2UL << order);
739 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
740 return COMPACT_SKIPPED;
741
742 /*
743 * fragmentation index determines if allocation failures are due to
744 * low memory or external fragmentation
745 *
Shaohua Lia582a732011-06-15 15:08:49 -0700746 * index of -1000 implies allocations might succeed depending on
747 * watermarks
Mel Gorman3e7d3442011-01-13 15:45:56 -0800748 * index towards 0 implies failure is due to lack of memory
749 * index towards 1000 implies failure is due to fragmentation
750 *
751 * Only compact if a failure would be due to fragmentation.
752 */
753 fragindex = fragmentation_index(zone, order);
754 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
755 return COMPACT_SKIPPED;
756
Shaohua Lia582a732011-06-15 15:08:49 -0700757 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
758 0, 0))
Mel Gorman3e7d3442011-01-13 15:45:56 -0800759 return COMPACT_PARTIAL;
760
761 return COMPACT_CONTINUE;
762}
763
Mel Gorman748446b2010-05-24 14:32:27 -0700764static int compact_zone(struct zone *zone, struct compact_control *cc)
765{
766 int ret;
767
Mel Gorman3e7d3442011-01-13 15:45:56 -0800768 ret = compaction_suitable(zone, cc->order);
769 switch (ret) {
770 case COMPACT_PARTIAL:
771 case COMPACT_SKIPPED:
772 /* Compaction is likely to fail */
773 return ret;
774 case COMPACT_CONTINUE:
775 /* Fall through to compaction */
776 ;
777 }
778
Mel Gorman748446b2010-05-24 14:32:27 -0700779 /* Setup to move all movable pages to the end of the zone */
780 cc->migrate_pfn = zone->zone_start_pfn;
781 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
782 cc->free_pfn &= ~(pageblock_nr_pages-1);
783
784 migrate_prep_local();
785
786 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
787 unsigned long nr_migrate, nr_remaining;
Minchan Kim9d502c12011-03-22 16:30:39 -0700788 int err;
Mel Gorman748446b2010-05-24 14:32:27 -0700789
Mel Gormanf9e35b32011-06-15 15:08:52 -0700790 switch (isolate_migratepages(zone, cc)) {
791 case ISOLATE_ABORT:
792 ret = COMPACT_PARTIAL;
793 goto out;
794 case ISOLATE_NONE:
Mel Gorman748446b2010-05-24 14:32:27 -0700795 continue;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700796 case ISOLATE_SUCCESS:
797 ;
798 }
Mel Gorman748446b2010-05-24 14:32:27 -0700799
800 nr_migrate = cc->nr_migratepages;
Minchan Kim9d502c12011-03-22 16:30:39 -0700801 err = migrate_pages(&cc->migratepages, compaction_alloc,
Mel Gorman7f0f2492011-01-13 15:45:58 -0800802 (unsigned long)cc, false,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800803 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
Mel Gorman748446b2010-05-24 14:32:27 -0700804 update_nr_listpages(cc);
805 nr_remaining = cc->nr_migratepages;
806
807 count_vm_event(COMPACTBLOCKS);
808 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
809 if (nr_remaining)
810 count_vm_events(COMPACTPAGEFAILED, nr_remaining);
Mel Gormanb7aba692011-01-13 15:45:54 -0800811 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
812 nr_remaining);
Mel Gorman748446b2010-05-24 14:32:27 -0700813
814 /* Release LRU pages not migrated */
Minchan Kim9d502c12011-03-22 16:30:39 -0700815 if (err) {
Mel Gorman748446b2010-05-24 14:32:27 -0700816 putback_lru_pages(&cc->migratepages);
817 cc->nr_migratepages = 0;
David Rientjes7a08b442012-07-11 14:02:13 -0700818 if (err == -ENOMEM) {
819 ret = COMPACT_PARTIAL;
820 goto out;
821 }
Mel Gorman748446b2010-05-24 14:32:27 -0700822 }
Mel Gormanc74068b2012-10-08 16:29:12 -0700823
824 /* Capture a page now if it is a suitable size */
825 compact_capture_page(cc);
Mel Gorman748446b2010-05-24 14:32:27 -0700826 }
827
Mel Gormanf9e35b32011-06-15 15:08:52 -0700828out:
Mel Gorman748446b2010-05-24 14:32:27 -0700829 /* Release free pages and check accounting */
830 cc->nr_freepages -= release_freepages(&cc->freepages);
831 VM_BUG_ON(cc->nr_freepages != 0);
832
833 return ret;
834}
Mel Gorman76ab0f52010-05-24 14:32:28 -0700835
Kyungmin Parkd43a87e2011-10-31 17:09:08 -0700836static unsigned long compact_zone_order(struct zone *zone,
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800837 int order, gfp_t gfp_mask,
Mel Gormanc74068b2012-10-08 16:29:12 -0700838 bool sync, bool *contended,
839 struct page **page)
Mel Gorman56de7262010-05-24 14:32:30 -0700840{
841 struct compact_control cc = {
842 .nr_freepages = 0,
843 .nr_migratepages = 0,
844 .order = order,
845 .migratetype = allocflags_to_migratetype(gfp_mask),
846 .zone = zone,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800847 .sync = sync,
Mel Gorman0ba33882012-08-21 16:16:17 -0700848 .contended = contended,
Mel Gormanc74068b2012-10-08 16:29:12 -0700849 .page = page,
Mel Gorman56de7262010-05-24 14:32:30 -0700850 };
851 INIT_LIST_HEAD(&cc.freepages);
852 INIT_LIST_HEAD(&cc.migratepages);
853
854 return compact_zone(zone, &cc);
855}
856
Mel Gorman5e771902010-05-24 14:32:31 -0700857int sysctl_extfrag_threshold = 500;
858
Mel Gorman56de7262010-05-24 14:32:30 -0700859/**
860 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
861 * @zonelist: The zonelist used for the current allocation
862 * @order: The order of the current allocation
863 * @gfp_mask: The GFP mask of the current allocation
864 * @nodemask: The allowed nodes to allocate from
Mel Gorman77f1fe62011-01-13 15:45:57 -0800865 * @sync: Whether migration is synchronous or not
Mel Gorman56de7262010-05-24 14:32:30 -0700866 *
867 * This is the main entry point for direct page compaction.
868 */
869unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800870 int order, gfp_t gfp_mask, nodemask_t *nodemask,
Mel Gormanc74068b2012-10-08 16:29:12 -0700871 bool sync, bool *contended, struct page **page)
Mel Gorman56de7262010-05-24 14:32:30 -0700872{
873 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
874 int may_enter_fs = gfp_mask & __GFP_FS;
875 int may_perform_io = gfp_mask & __GFP_IO;
Mel Gorman56de7262010-05-24 14:32:30 -0700876 struct zoneref *z;
877 struct zone *zone;
878 int rc = COMPACT_SKIPPED;
Bartlomiej Zolnierkiewicz850bf192012-10-08 16:32:05 -0700879 int alloc_flags = 0;
Mel Gorman56de7262010-05-24 14:32:30 -0700880
Mel Gormanbc337a92012-10-08 16:29:09 -0700881 /* Check if the GFP flags allow compaction */
Andrea Arcangelic5a73c32011-01-13 15:47:11 -0800882 if (!order || !may_enter_fs || !may_perform_io)
Mel Gorman56de7262010-05-24 14:32:30 -0700883 return rc;
884
885 count_vm_event(COMPACTSTALL);
886
Bartlomiej Zolnierkiewicz850bf192012-10-08 16:32:05 -0700887#ifdef CONFIG_CMA
888 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
889 alloc_flags |= ALLOC_CMA;
890#endif
Mel Gorman56de7262010-05-24 14:32:30 -0700891 /* Compact each zone in the list */
892 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
893 nodemask) {
Mel Gorman56de7262010-05-24 14:32:30 -0700894 int status;
895
Mel Gorman0ba33882012-08-21 16:16:17 -0700896 status = compact_zone_order(zone, order, gfp_mask, sync,
Mel Gormanc74068b2012-10-08 16:29:12 -0700897 contended, page);
Mel Gorman56de7262010-05-24 14:32:30 -0700898 rc = max(status, rc);
899
Mel Gorman3e7d3442011-01-13 15:45:56 -0800900 /* If a normal allocation would succeed, stop compacting */
Bartlomiej Zolnierkiewicz850bf192012-10-08 16:32:05 -0700901 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
902 alloc_flags))
Mel Gorman56de7262010-05-24 14:32:30 -0700903 break;
904 }
905
906 return rc;
907}
908
909
Mel Gorman76ab0f52010-05-24 14:32:28 -0700910/* Compact all zones within a node */
Rik van Riel7be62de2012-03-21 16:33:52 -0700911static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
Mel Gorman76ab0f52010-05-24 14:32:28 -0700912{
913 int zoneid;
Mel Gorman76ab0f52010-05-24 14:32:28 -0700914 struct zone *zone;
915
Mel Gorman76ab0f52010-05-24 14:32:28 -0700916 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
Mel Gorman76ab0f52010-05-24 14:32:28 -0700917
918 zone = &pgdat->node_zones[zoneid];
919 if (!populated_zone(zone))
920 continue;
921
Rik van Riel7be62de2012-03-21 16:33:52 -0700922 cc->nr_freepages = 0;
923 cc->nr_migratepages = 0;
924 cc->zone = zone;
925 INIT_LIST_HEAD(&cc->freepages);
926 INIT_LIST_HEAD(&cc->migratepages);
Mel Gorman76ab0f52010-05-24 14:32:28 -0700927
Dan Carpenteraad6ec32012-03-21 16:33:54 -0700928 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
Rik van Riel7be62de2012-03-21 16:33:52 -0700929 compact_zone(zone, cc);
Mel Gorman76ab0f52010-05-24 14:32:28 -0700930
Rik van Rielaff62242012-03-21 16:33:52 -0700931 if (cc->order > 0) {
932 int ok = zone_watermark_ok(zone, cc->order,
933 low_wmark_pages(zone), 0, 0);
Minchan Kim57bb21c2012-08-21 16:16:03 -0700934 if (ok && cc->order >= zone->compact_order_failed)
Rik van Rielaff62242012-03-21 16:33:52 -0700935 zone->compact_order_failed = cc->order + 1;
936 /* Currently async compaction is never deferred. */
937 else if (!ok && cc->sync)
938 defer_compaction(zone, cc->order);
939 }
940
Rik van Riel7be62de2012-03-21 16:33:52 -0700941 VM_BUG_ON(!list_empty(&cc->freepages));
942 VM_BUG_ON(!list_empty(&cc->migratepages));
Mel Gorman76ab0f52010-05-24 14:32:28 -0700943 }
944
945 return 0;
946}
947
Rik van Riel7be62de2012-03-21 16:33:52 -0700948int compact_pgdat(pg_data_t *pgdat, int order)
949{
950 struct compact_control cc = {
951 .order = order,
952 .sync = false,
Mel Gormanc74068b2012-10-08 16:29:12 -0700953 .page = NULL,
Rik van Riel7be62de2012-03-21 16:33:52 -0700954 };
955
956 return __compact_pgdat(pgdat, &cc);
957}
958
959static int compact_node(int nid)
960{
Rik van Riel7be62de2012-03-21 16:33:52 -0700961 struct compact_control cc = {
962 .order = -1,
963 .sync = true,
Mel Gormanc74068b2012-10-08 16:29:12 -0700964 .page = NULL,
Rik van Riel7be62de2012-03-21 16:33:52 -0700965 };
966
Hugh Dickins8575ec22012-03-21 16:33:53 -0700967 return __compact_pgdat(NODE_DATA(nid), &cc);
Rik van Riel7be62de2012-03-21 16:33:52 -0700968}
969
Mel Gorman76ab0f52010-05-24 14:32:28 -0700970/* Compact all nodes in the system */
Jason Liuc0b96522013-01-11 14:31:47 -0800971static void compact_nodes(void)
Mel Gorman76ab0f52010-05-24 14:32:28 -0700972{
973 int nid;
974
Hugh Dickins8575ec22012-03-21 16:33:53 -0700975 /* Flush pending updates to the LRU lists */
976 lru_add_drain_all();
977
Mel Gorman76ab0f52010-05-24 14:32:28 -0700978 for_each_online_node(nid)
979 compact_node(nid);
Mel Gorman76ab0f52010-05-24 14:32:28 -0700980}
981
982/* The written value is actually unused, all memory is compacted */
983int sysctl_compact_memory;
984
985/* This is the entry point for compacting all nodes via /proc/sys/vm */
986int sysctl_compaction_handler(struct ctl_table *table, int write,
987 void __user *buffer, size_t *length, loff_t *ppos)
988{
989 if (write)
Jason Liuc0b96522013-01-11 14:31:47 -0800990 compact_nodes();
Mel Gorman76ab0f52010-05-24 14:32:28 -0700991
992 return 0;
993}
Mel Gormaned4a6d72010-05-24 14:32:29 -0700994
Mel Gorman5e771902010-05-24 14:32:31 -0700995int sysctl_extfrag_handler(struct ctl_table *table, int write,
996 void __user *buffer, size_t *length, loff_t *ppos)
997{
998 proc_dointvec_minmax(table, write, buffer, length, ppos);
999
1000 return 0;
1001}
1002
Mel Gormaned4a6d72010-05-24 14:32:29 -07001003#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
Kay Sievers10fbcf42011-12-21 14:48:43 -08001004ssize_t sysfs_compact_node(struct device *dev,
1005 struct device_attribute *attr,
Mel Gormaned4a6d72010-05-24 14:32:29 -07001006 const char *buf, size_t count)
1007{
Hugh Dickins8575ec22012-03-21 16:33:53 -07001008 int nid = dev->id;
1009
1010 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1011 /* Flush pending updates to the LRU lists */
1012 lru_add_drain_all();
1013
1014 compact_node(nid);
1015 }
Mel Gormaned4a6d72010-05-24 14:32:29 -07001016
1017 return count;
1018}
Kay Sievers10fbcf42011-12-21 14:48:43 -08001019static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001020
1021int compaction_register_node(struct node *node)
1022{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001023 return device_create_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001024}
1025
1026void compaction_unregister_node(struct node *node)
1027{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001028 return device_remove_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001029}
1030#endif /* CONFIG_SYSFS && CONFIG_NUMA */
Michal Nazarewicz02ff1de2011-12-29 13:09:50 +01001031
1032#endif /* CONFIG_COMPACTION */