| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_COMPACTION_H | 
 | 2 | #define _LINUX_COMPACTION_H | 
 | 3 |  | 
| Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4 | /* Return values for compact_zone() and try_to_compact_pages() */ | 
 | 5 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ | 
 | 6 | #define COMPACT_SKIPPED		0 | 
 | 7 | /* compaction should continue to another pageblock */ | 
 | 8 | #define COMPACT_CONTINUE	1 | 
 | 9 | /* direct compaction partially compacted a zone and there are suitable pages */ | 
 | 10 | #define COMPACT_PARTIAL		2 | 
 | 11 | /* The full zone was compacted */ | 
 | 12 | #define COMPACT_COMPLETE	3 | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 13 |  | 
| Andrea Arcangeli | 5a03b05 | 2011-01-13 15:47:11 -0800 | [diff] [blame] | 14 | #define COMPACT_MODE_DIRECT_RECLAIM	0 | 
 | 15 | #define COMPACT_MODE_KSWAPD		1 | 
 | 16 |  | 
| Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 17 | #ifdef CONFIG_COMPACTION | 
 | 18 | extern int sysctl_compact_memory; | 
 | 19 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, | 
 | 20 | 			void __user *buffer, size_t *length, loff_t *ppos); | 
| Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 21 | extern int sysctl_extfrag_threshold; | 
 | 22 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | 
 | 23 | 			void __user *buffer, size_t *length, loff_t *ppos); | 
| Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 24 |  | 
 | 25 | extern int fragmentation_index(struct zone *zone, unsigned int order); | 
 | 26 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | 
| Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 27 | 			int order, gfp_t gfp_mask, nodemask_t *mask, | 
 | 28 | 			bool sync); | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 29 | extern unsigned long compaction_suitable(struct zone *zone, int order); | 
 | 30 | extern unsigned long compact_zone_order(struct zone *zone, int order, | 
| Andrea Arcangeli | 5a03b05 | 2011-01-13 15:47:11 -0800 | [diff] [blame] | 31 | 					gfp_t gfp_mask, bool sync, | 
 | 32 | 					int compact_mode); | 
| Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 33 |  | 
 | 34 | /* Do not skip compaction more than 64 times */ | 
 | 35 | #define COMPACT_MAX_DEFER_SHIFT 6 | 
 | 36 |  | 
 | 37 | /* | 
 | 38 |  * Compaction is deferred when compaction fails to result in a page | 
 | 39 |  * allocation success. 1 << compact_defer_limit compactions are skipped up | 
 | 40 |  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT | 
 | 41 |  */ | 
 | 42 | static inline void defer_compaction(struct zone *zone) | 
 | 43 | { | 
 | 44 | 	zone->compact_considered = 0; | 
 | 45 | 	zone->compact_defer_shift++; | 
 | 46 |  | 
 | 47 | 	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) | 
 | 48 | 		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; | 
 | 49 | } | 
 | 50 |  | 
 | 51 | /* Returns true if compaction should be skipped this time */ | 
 | 52 | static inline bool compaction_deferred(struct zone *zone) | 
 | 53 | { | 
 | 54 | 	unsigned long defer_limit = 1UL << zone->compact_defer_shift; | 
 | 55 |  | 
 | 56 | 	/* Avoid possible overflow */ | 
 | 57 | 	if (++zone->compact_considered > defer_limit) | 
 | 58 | 		zone->compact_considered = defer_limit; | 
 | 59 |  | 
 | 60 | 	return zone->compact_considered < (1UL << zone->compact_defer_shift); | 
 | 61 | } | 
 | 62 |  | 
| Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 63 | #else | 
 | 64 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | 
| Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 65 | 			int order, gfp_t gfp_mask, nodemask_t *nodemask, | 
 | 66 | 			bool sync) | 
| Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 67 | { | 
 | 68 | 	return COMPACT_CONTINUE; | 
 | 69 | } | 
 | 70 |  | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 71 | static inline unsigned long compaction_suitable(struct zone *zone, int order) | 
 | 72 | { | 
 | 73 | 	return COMPACT_SKIPPED; | 
 | 74 | } | 
 | 75 |  | 
 | 76 | static inline unsigned long compact_zone_order(struct zone *zone, int order, | 
| Andrea Arcangeli | 5a03b05 | 2011-01-13 15:47:11 -0800 | [diff] [blame] | 77 | 					       gfp_t gfp_mask, bool sync, | 
 | 78 | 					       int compact_mode) | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 79 | { | 
| Andrea Arcangeli | 5a03b05 | 2011-01-13 15:47:11 -0800 | [diff] [blame] | 80 | 	return COMPACT_CONTINUE; | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 81 | } | 
 | 82 |  | 
| Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 83 | static inline void defer_compaction(struct zone *zone) | 
 | 84 | { | 
 | 85 | } | 
 | 86 |  | 
 | 87 | static inline bool compaction_deferred(struct zone *zone) | 
 | 88 | { | 
 | 89 | 	return 1; | 
 | 90 | } | 
 | 91 |  | 
| Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 92 | #endif /* CONFIG_COMPACTION */ | 
 | 93 |  | 
| Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 94 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | 
 | 95 | extern int compaction_register_node(struct node *node); | 
 | 96 | extern void compaction_unregister_node(struct node *node); | 
 | 97 |  | 
 | 98 | #else | 
 | 99 |  | 
 | 100 | static inline int compaction_register_node(struct node *node) | 
 | 101 | { | 
 | 102 | 	return 0; | 
 | 103 | } | 
 | 104 |  | 
 | 105 | static inline void compaction_unregister_node(struct node *node) | 
 | 106 | { | 
 | 107 | } | 
 | 108 | #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ | 
 | 109 |  | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 110 | #endif /* _LINUX_COMPACTION_H */ |