| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMZONE_H | 
 | 2 | #define _LINUX_MMZONE_H | 
 | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #ifndef __ASSEMBLY__ | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 5 | #ifndef __GENERATING_BOUNDS_H | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/spinlock.h> | 
 | 8 | #include <linux/list.h> | 
 | 9 | #include <linux/wait.h> | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 10 | #include <linux/bitops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/cache.h> | 
 | 12 | #include <linux/threads.h> | 
 | 13 | #include <linux/numa.h> | 
 | 14 | #include <linux/init.h> | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 15 | #include <linux/seqlock.h> | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 16 | #include <linux/nodemask.h> | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 17 | #include <linux/pageblock-flags.h> | 
| Sam Ravnborg | 01fc0ac | 2009-04-19 21:57:19 +0200 | [diff] [blame] | 18 | #include <generated/bounds.h> | 
| Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 19 | #include <linux/atomic.h> | 
| Ralf Baechle | 93ff66b | 2006-06-04 02:51:29 -0700 | [diff] [blame] | 20 | #include <asm/page.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  | 
 | 22 | /* Free memory management - zoned buddy allocator.  */ | 
 | 23 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | 
 | 24 | #define MAX_ORDER 11 | 
 | 25 | #else | 
 | 26 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | 
 | 27 | #endif | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 28 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 |  | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 30 | /* | 
 | 31 |  * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | 
 | 32 |  * costly to service.  That is between allocation orders which should | 
| Wang YanQing | 35fca53 | 2012-04-15 20:42:28 +0800 | [diff] [blame] | 33 |  * coalesce naturally under reasonable reclaim pressure and those which | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 34 |  * will not. | 
 | 35 |  */ | 
 | 36 | #define PAGE_ALLOC_COSTLY_ORDER 3 | 
 | 37 |  | 
| Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 38 | enum { | 
 | 39 | 	MIGRATE_UNMOVABLE, | 
 | 40 | 	MIGRATE_RECLAIMABLE, | 
 | 41 | 	MIGRATE_MOVABLE, | 
 | 42 | 	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */ | 
 | 43 | 	MIGRATE_RESERVE = MIGRATE_PCPTYPES, | 
 | 44 | #ifdef CONFIG_CMA | 
 | 45 | 	/* | 
 | 46 | 	 * MIGRATE_CMA migration type is designed to mimic the way | 
 | 47 | 	 * ZONE_MOVABLE works.  Only movable pages can be allocated | 
 | 48 | 	 * from MIGRATE_CMA pageblocks and page allocator never | 
 | 49 | 	 * implicitly change migration type of MIGRATE_CMA pageblock. | 
 | 50 | 	 * | 
 | 51 | 	 * The way to use it is to change migratetype of a range of | 
 | 52 | 	 * pageblocks to MIGRATE_CMA which can be done by | 
 | 53 | 	 * __free_pageblock_cma() function.  What is important though | 
 | 54 | 	 * is that a range of pageblocks must be aligned to | 
 | 55 | 	 * MAX_ORDER_NR_PAGES should biggest page be bigger then | 
 | 56 | 	 * a single pageblock. | 
 | 57 | 	 */ | 
 | 58 | 	MIGRATE_CMA, | 
 | 59 | #endif | 
 | 60 | 	MIGRATE_ISOLATE,	/* can't allocate from here */ | 
 | 61 | 	MIGRATE_TYPES | 
 | 62 | }; | 
 | 63 |  | 
 | 64 | #ifdef CONFIG_CMA | 
 | 65 | #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | 
| Marek Szyprowski | 49f223a | 2012-01-25 12:49:24 +0100 | [diff] [blame] | 66 | #  define cma_wmark_pages(zone)	zone->min_cma_pages | 
| Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 67 | #else | 
 | 68 | #  define is_migrate_cma(migratetype) false | 
| Marek Szyprowski | 49f223a | 2012-01-25 12:49:24 +0100 | [diff] [blame] | 69 | #  define cma_wmark_pages(zone) 0 | 
| Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 70 | #endif | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 71 |  | 
 | 72 | #define for_each_migratetype_order(order, type) \ | 
 | 73 | 	for (order = 0; order < MAX_ORDER; order++) \ | 
 | 74 | 		for (type = 0; type < MIGRATE_TYPES; type++) | 
 | 75 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 76 | extern int page_group_by_mobility_disabled; | 
 | 77 |  | 
 | 78 | static inline int get_pageblock_migratetype(struct page *page) | 
 | 79 | { | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 80 | 	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | 
 | 81 | } | 
 | 82 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | struct free_area { | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 84 | 	struct list_head	free_list[MIGRATE_TYPES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | 	unsigned long		nr_free; | 
 | 86 | }; | 
 | 87 |  | 
 | 88 | struct pglist_data; | 
 | 89 |  | 
 | 90 | /* | 
 | 91 |  * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | 
 | 92 |  * So add a wild amount of padding here to ensure that they fall into separate | 
 | 93 |  * cachelines.  There are very few zone structures in the machine, so space | 
 | 94 |  * consumption is not a concern here. | 
 | 95 |  */ | 
 | 96 | #if defined(CONFIG_SMP) | 
 | 97 | struct zone_padding { | 
 | 98 | 	char x[0]; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 99 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | #define ZONE_PADDING(name)	struct zone_padding name; | 
 | 101 | #else | 
 | 102 | #define ZONE_PADDING(name) | 
 | 103 | #endif | 
 | 104 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 105 | enum zone_stat_item { | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 106 | 	/* First 128 byte cacheline (assuming 64 bit words) */ | 
| Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 107 | 	NR_FREE_PAGES, | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 108 | 	NR_LRU_BASE, | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 109 | 	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | 
 | 110 | 	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */ | 
 | 111 | 	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */ | 
 | 112 | 	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 113 | 	NR_UNEVICTABLE,		/*  "     "     "   "       "         */ | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 114 | 	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */ | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 115 | 	NR_ANON_PAGES,	/* Mapped anonymous pages */ | 
 | 116 | 	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables. | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 117 | 			   only modified from process context */ | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 118 | 	NR_FILE_PAGES, | 
| Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 119 | 	NR_FILE_DIRTY, | 
| Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 120 | 	NR_WRITEBACK, | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 121 | 	NR_SLAB_RECLAIMABLE, | 
 | 122 | 	NR_SLAB_UNRECLAIMABLE, | 
 | 123 | 	NR_PAGETABLE,		/* used for pagetables */ | 
| KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 124 | 	NR_KERNEL_STACK, | 
 | 125 | 	/* Second 128 byte cacheline */ | 
| Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 126 | 	NR_UNSTABLE_NFS,	/* NFS unstable pages */ | 
| Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 127 | 	NR_BOUNCE, | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 128 | 	NR_VMSCAN_WRITE, | 
| Mel Gorman | 49ea7eb | 2011-10-31 17:07:59 -0700 | [diff] [blame] | 129 | 	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */ | 
| Miklos Szeredi | fc3ba69 | 2008-04-30 00:54:38 -0700 | [diff] [blame] | 130 | 	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */ | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 131 | 	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */ | 
 | 132 | 	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */ | 
| KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 133 | 	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */ | 
| Michael Rubin | ea941f0 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 134 | 	NR_DIRTIED,		/* page dirtyings since bootup */ | 
 | 135 | 	NR_WRITTEN,		/* page writings since bootup */ | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 136 | #ifdef CONFIG_NUMA | 
 | 137 | 	NUMA_HIT,		/* allocated in intended node */ | 
 | 138 | 	NUMA_MISS,		/* allocated in non intended node */ | 
 | 139 | 	NUMA_FOREIGN,		/* was intended here, hit elsewhere */ | 
 | 140 | 	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */ | 
 | 141 | 	NUMA_LOCAL,		/* allocation from local node */ | 
 | 142 | 	NUMA_OTHER,		/* allocation from other node */ | 
 | 143 | #endif | 
| Andrea Arcangeli | 7913417 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 144 | 	NR_ANON_TRANSPARENT_HUGEPAGES, | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 145 | 	NR_VM_ZONE_STAT_ITEMS }; | 
 | 146 |  | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 147 | /* | 
 | 148 |  * We do arithmetic on the LRU lists in various places in the code, | 
 | 149 |  * so it is important to keep the active lists LRU_ACTIVE higher in | 
 | 150 |  * the array than the corresponding inactive lists, and to keep | 
 | 151 |  * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | 
 | 152 |  * | 
 | 153 |  * This has to be kept in sync with the statistics in zone_stat_item | 
 | 154 |  * above and the descriptions in vmstat_text in mm/vmstat.c | 
 | 155 |  */ | 
 | 156 | #define LRU_BASE 0 | 
 | 157 | #define LRU_ACTIVE 1 | 
 | 158 | #define LRU_FILE 2 | 
 | 159 |  | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 160 | enum lru_list { | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 161 | 	LRU_INACTIVE_ANON = LRU_BASE, | 
 | 162 | 	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | 
 | 163 | 	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | 
 | 164 | 	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 165 | 	LRU_UNEVICTABLE, | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 166 | 	NR_LRU_LISTS | 
 | 167 | }; | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 168 |  | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 169 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 170 |  | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 171 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 172 |  | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 173 | static inline int is_file_lru(enum lru_list lru) | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 174 | { | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 175 | 	return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 176 | } | 
 | 177 |  | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 178 | static inline int is_active_lru(enum lru_list lru) | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 179 | { | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 180 | 	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 181 | } | 
 | 182 |  | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 183 | static inline int is_unevictable_lru(enum lru_list lru) | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 184 | { | 
| Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 185 | 	return (lru == LRU_UNEVICTABLE); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 186 | } | 
 | 187 |  | 
| Hugh Dickins | 89abfab | 2012-05-29 15:06:53 -0700 | [diff] [blame] | 188 | struct zone_reclaim_stat { | 
 | 189 | 	/* | 
 | 190 | 	 * The pageout code in vmscan.c keeps track of how many of the | 
 | 191 | 	 * mem/swap backed and file backed pages are refeferenced. | 
 | 192 | 	 * The higher the rotated/scanned ratio, the more valuable | 
 | 193 | 	 * that cache is. | 
 | 194 | 	 * | 
 | 195 | 	 * The anon LRU stats live in [0], file LRU stats in [1] | 
 | 196 | 	 */ | 
 | 197 | 	unsigned long		recent_rotated[2]; | 
 | 198 | 	unsigned long		recent_scanned[2]; | 
 | 199 | }; | 
 | 200 |  | 
| Johannes Weiner | 6290df5 | 2012-01-12 17:18:10 -0800 | [diff] [blame] | 201 | struct lruvec { | 
 | 202 | 	struct list_head lists[NR_LRU_LISTS]; | 
| Hugh Dickins | 89abfab | 2012-05-29 15:06:53 -0700 | [diff] [blame] | 203 | 	struct zone_reclaim_stat reclaim_stat; | 
| Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 204 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
 | 205 | 	struct zone *zone; | 
 | 206 | #endif | 
| Johannes Weiner | 6290df5 | 2012-01-12 17:18:10 -0800 | [diff] [blame] | 207 | }; | 
 | 208 |  | 
| KAMEZAWA Hiroyuki | bb2a0de | 2011-07-26 16:08:22 -0700 | [diff] [blame] | 209 | /* Mask used at gathering information at once (see memcontrol.c) */ | 
 | 210 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) | 
 | 211 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | 
 | 212 | #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) | 
 | 213 | #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1) | 
 | 214 |  | 
| Minchan Kim | 39deaf8 | 2011-10-31 17:06:51 -0700 | [diff] [blame] | 215 | /* Isolate clean file */ | 
| Konstantin Khlebnikov | f3fd4a6 | 2012-05-29 15:06:54 -0700 | [diff] [blame] | 216 | #define ISOLATE_CLEAN		((__force isolate_mode_t)0x1) | 
| Minchan Kim | f80c067 | 2011-10-31 17:06:55 -0700 | [diff] [blame] | 217 | /* Isolate unmapped file */ | 
| Konstantin Khlebnikov | f3fd4a6 | 2012-05-29 15:06:54 -0700 | [diff] [blame] | 218 | #define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2) | 
| Mel Gorman | c824493 | 2012-01-12 17:19:38 -0800 | [diff] [blame] | 219 | /* Isolate for asynchronous migration */ | 
| Konstantin Khlebnikov | f3fd4a6 | 2012-05-29 15:06:54 -0700 | [diff] [blame] | 220 | #define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4) | 
| Minchan Kim | 4356f21 | 2011-10-31 17:06:47 -0700 | [diff] [blame] | 221 |  | 
 | 222 | /* LRU Isolation modes. */ | 
 | 223 | typedef unsigned __bitwise__ isolate_mode_t; | 
 | 224 |  | 
| Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 225 | enum zone_watermarks { | 
 | 226 | 	WMARK_MIN, | 
 | 227 | 	WMARK_LOW, | 
 | 228 | 	WMARK_HIGH, | 
 | 229 | 	NR_WMARK | 
 | 230 | }; | 
 | 231 |  | 
 | 232 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) | 
 | 233 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) | 
 | 234 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) | 
 | 235 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | struct per_cpu_pages { | 
 | 237 | 	int count;		/* number of pages in the list */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | 	int high;		/* high watermark, emptying needed */ | 
 | 239 | 	int batch;		/* chunk size for buddy add/remove */ | 
| Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 240 |  | 
 | 241 | 	/* Lists of pages, one per migrate type stored on the pcp-lists */ | 
 | 242 | 	struct list_head lists[MIGRATE_PCPTYPES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | }; | 
 | 244 |  | 
 | 245 | struct per_cpu_pageset { | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 246 | 	struct per_cpu_pages pcp; | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 247 | #ifdef CONFIG_NUMA | 
 | 248 | 	s8 expire; | 
 | 249 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 250 | #ifdef CONFIG_SMP | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 251 | 	s8 stat_threshold; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 252 | 	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | 
 | 253 | #endif | 
| Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 254 | }; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 255 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 256 | #endif /* !__GENERATING_BOUNDS.H */ | 
 | 257 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 258 | enum zone_type { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 259 | #ifdef CONFIG_ZONE_DMA | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 260 | 	/* | 
 | 261 | 	 * ZONE_DMA is used when there are devices that are not able | 
 | 262 | 	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | 
 | 263 | 	 * carve out the portion of memory that is needed for these devices. | 
 | 264 | 	 * The range is arch specific. | 
 | 265 | 	 * | 
 | 266 | 	 * Some examples | 
 | 267 | 	 * | 
 | 268 | 	 * Architecture		Limit | 
 | 269 | 	 * --------------------------- | 
 | 270 | 	 * parisc, ia64, sparc	<4G | 
 | 271 | 	 * s390			<2G | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 272 | 	 * arm			Various | 
 | 273 | 	 * alpha		Unlimited or 0-16MB. | 
 | 274 | 	 * | 
 | 275 | 	 * i386, x86_64 and multiple other arches | 
 | 276 | 	 * 			<16M. | 
 | 277 | 	 */ | 
 | 278 | 	ZONE_DMA, | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 279 | #endif | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 280 | #ifdef CONFIG_ZONE_DMA32 | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 281 | 	/* | 
 | 282 | 	 * x86_64 needs two ZONE_DMAs because it supports devices that are | 
 | 283 | 	 * only able to do DMA to the lower 16M but also 32 bit devices that | 
 | 284 | 	 * can only do DMA areas below 4G. | 
 | 285 | 	 */ | 
 | 286 | 	ZONE_DMA32, | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 287 | #endif | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 288 | 	/* | 
 | 289 | 	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | 
 | 290 | 	 * performed on pages in ZONE_NORMAL if the DMA devices support | 
 | 291 | 	 * transfers to all addressable memory. | 
 | 292 | 	 */ | 
 | 293 | 	ZONE_NORMAL, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 294 | #ifdef CONFIG_HIGHMEM | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 295 | 	/* | 
 | 296 | 	 * A memory area that is only addressable by the kernel through | 
 | 297 | 	 * mapping portions into its own address space. This is for example | 
 | 298 | 	 * used by i386 to allow the kernel to address the memory beyond | 
 | 299 | 	 * 900MB. The kernel will set up special mappings (page | 
 | 300 | 	 * table entries on i386) for each page that the kernel needs to | 
 | 301 | 	 * access. | 
 | 302 | 	 */ | 
 | 303 | 	ZONE_HIGHMEM, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 304 | #endif | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 305 | 	ZONE_MOVABLE, | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 306 | 	__MAX_NR_ZONES | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 307 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 309 | #ifndef __GENERATING_BOUNDS_H | 
 | 310 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | /* | 
 | 312 |  * When a memory allocation must conform to specific limitations (such | 
 | 313 |  * as being suitable for DMA) the caller will pass in hints to the | 
 | 314 |  * allocator in the gfp_mask, in the zone modifier bits.  These bits | 
 | 315 |  * are used to select a priority ordered list of memory zones which | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 316 |  * match the requested limits. See gfp_zone() in include/linux/gfp.h | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 |  */ | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 318 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 319 | #if MAX_NR_ZONES < 2 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 320 | #define ZONES_SHIFT 0 | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 321 | #elif MAX_NR_ZONES <= 2 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 322 | #define ZONES_SHIFT 1 | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 323 | #elif MAX_NR_ZONES <= 4 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 324 | #define ZONES_SHIFT 2 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 325 | #else | 
 | 326 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 327 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | struct zone { | 
 | 330 | 	/* Fields commonly accessed by the page allocator */ | 
| Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 331 |  | 
 | 332 | 	/* zone watermarks, access with *_wmark_pages(zone) macros */ | 
 | 333 | 	unsigned long watermark[NR_WMARK]; | 
 | 334 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 	/* | 
| Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 336 | 	 * When free pages are below this point, additional steps are taken | 
 | 337 | 	 * when reading the number of free pages to avoid per-cpu counter | 
 | 338 | 	 * drift allowing watermarks to be breached | 
 | 339 | 	 */ | 
 | 340 | 	unsigned long percpu_drift_mark; | 
 | 341 |  | 
 | 342 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | 	 * We don't know if the memory that we're going to allocate will be freeable | 
 | 344 | 	 * or/and it will be released eventually, so to avoid totally wasting several | 
 | 345 | 	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 
 | 346 | 	 * to run OOM on the lower zones despite there's tons of freeable ram | 
 | 347 | 	 * on the higher zones). This array is recalculated at runtime if the | 
 | 348 | 	 * sysctl_lowmem_reserve_ratio sysctl changes. | 
 | 349 | 	 */ | 
 | 350 | 	unsigned long		lowmem_reserve[MAX_NR_ZONES]; | 
 | 351 |  | 
| Johannes Weiner | ab8fabd | 2012-01-10 15:07:42 -0800 | [diff] [blame] | 352 | 	/* | 
 | 353 | 	 * This is a per-zone reserve of pages that should not be | 
 | 354 | 	 * considered dirtyable memory. | 
 | 355 | 	 */ | 
 | 356 | 	unsigned long		dirty_balance_reserve; | 
 | 357 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 358 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | d5f541e | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 359 | 	int node; | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 360 | 	/* | 
 | 361 | 	 * zone reclaim becomes active if more unmapped pages exist. | 
 | 362 | 	 */ | 
| Christoph Lameter | 8417bba | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 363 | 	unsigned long		min_unmapped_pages; | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 364 | 	unsigned long		min_slab_pages; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 365 | #endif | 
| Tejun Heo | 43cf38e | 2010-02-02 14:38:57 +0900 | [diff] [blame] | 366 | 	struct per_cpu_pageset __percpu *pageset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | 	/* | 
 | 368 | 	 * free areas of different sizes | 
 | 369 | 	 */ | 
 | 370 | 	spinlock_t		lock; | 
| KOSAKI Motohiro | 93e4a89 | 2010-03-05 13:41:55 -0800 | [diff] [blame] | 371 | 	int                     all_unreclaimable; /* All pages pinned */ | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 372 | #ifdef CONFIG_MEMORY_HOTPLUG | 
 | 373 | 	/* see spanned/present_pages for more description */ | 
 | 374 | 	seqlock_t		span_seqlock; | 
 | 375 | #endif | 
| Marek Szyprowski | 49f223a | 2012-01-25 12:49:24 +0100 | [diff] [blame] | 376 | #ifdef CONFIG_CMA | 
 | 377 | 	/* | 
 | 378 | 	 * CMA needs to increase watermark levels during the allocation | 
 | 379 | 	 * process to make sure that the system is not starved. | 
 | 380 | 	 */ | 
 | 381 | 	unsigned long		min_cma_pages; | 
 | 382 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | 	struct free_area	free_area[MAX_ORDER]; | 
 | 384 |  | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 385 | #ifndef CONFIG_SPARSEMEM | 
 | 386 | 	/* | 
| Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 387 | 	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 388 | 	 * In SPARSEMEM, this map is stored in struct mem_section | 
 | 389 | 	 */ | 
 | 390 | 	unsigned long		*pageblock_flags; | 
 | 391 | #endif /* CONFIG_SPARSEMEM */ | 
 | 392 |  | 
| Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 393 | #ifdef CONFIG_COMPACTION | 
 | 394 | 	/* | 
 | 395 | 	 * On compaction failure, 1<<compact_defer_shift compactions | 
 | 396 | 	 * are skipped before trying again. The number attempted since | 
 | 397 | 	 * last failure is tracked with compact_considered. | 
 | 398 | 	 */ | 
 | 399 | 	unsigned int		compact_considered; | 
 | 400 | 	unsigned int		compact_defer_shift; | 
| Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 401 | 	int			compact_order_failed; | 
| Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 402 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 |  | 
 | 404 | 	ZONE_PADDING(_pad1_) | 
 | 405 |  | 
 | 406 | 	/* Fields commonly accessed by the page reclaim scanner */ | 
| Johannes Weiner | 6290df5 | 2012-01-12 17:18:10 -0800 | [diff] [blame] | 407 | 	spinlock_t		lru_lock; | 
 | 408 | 	struct lruvec		lruvec; | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 409 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | 	unsigned long		pages_scanned;	   /* since last reclaim */ | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 411 | 	unsigned long		flags;		   /* zone flags, see below */ | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 412 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 413 | 	/* Zone statistics */ | 
 | 414 | 	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 415 |  | 
 | 416 | 	/* | 
| Rik van Riel | 556adec | 2008-10-18 20:26:34 -0700 | [diff] [blame] | 417 | 	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on | 
 | 418 | 	 * this zone's LRU.  Maintained by the pageout code. | 
 | 419 | 	 */ | 
 | 420 | 	unsigned int inactive_ratio; | 
 | 421 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 |  | 
 | 423 | 	ZONE_PADDING(_pad2_) | 
 | 424 | 	/* Rarely used or read-mostly fields */ | 
 | 425 |  | 
 | 426 | 	/* | 
 | 427 | 	 * wait_table		-- the array holding the hash table | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 428 | 	 * wait_table_hash_nr_entries	-- the size of the hash table array | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | 	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits) | 
 | 430 | 	 * | 
 | 431 | 	 * The purpose of all these is to keep track of the people | 
 | 432 | 	 * waiting for a page to become available and make them | 
 | 433 | 	 * runnable again when possible. The trouble is that this | 
 | 434 | 	 * consumes a lot of space, especially when so few things | 
 | 435 | 	 * wait on pages at a given time. So instead of using | 
 | 436 | 	 * per-page waitqueues, we use a waitqueue hash table. | 
 | 437 | 	 * | 
 | 438 | 	 * The bucket discipline is to sleep on the same queue when | 
 | 439 | 	 * colliding and wake all in that wait queue when removing. | 
 | 440 | 	 * When something wakes, it must check to be sure its page is | 
 | 441 | 	 * truly available, a la thundering herd. The cost of a | 
 | 442 | 	 * collision is great, but given the expected load of the | 
 | 443 | 	 * table, they should be so rare as to be outweighed by the | 
 | 444 | 	 * benefits from the saved space. | 
 | 445 | 	 * | 
 | 446 | 	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | 
 | 447 | 	 * primary users of these fields, and in mm/page_alloc.c | 
 | 448 | 	 * free_area_init_core() performs the initialization of them. | 
 | 449 | 	 */ | 
 | 450 | 	wait_queue_head_t	* wait_table; | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 451 | 	unsigned long		wait_table_hash_nr_entries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | 	unsigned long		wait_table_bits; | 
 | 453 |  | 
 | 454 | 	/* | 
 | 455 | 	 * Discontig memory support fields. | 
 | 456 | 	 */ | 
 | 457 | 	struct pglist_data	*zone_pgdat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | 	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 
 | 459 | 	unsigned long		zone_start_pfn; | 
 | 460 |  | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 461 | 	/* | 
 | 462 | 	 * zone_start_pfn, spanned_pages and present_pages are all | 
 | 463 | 	 * protected by span_seqlock.  It is a seqlock because it has | 
 | 464 | 	 * to be read outside of zone->lock, and it is done in the main | 
 | 465 | 	 * allocator path.  But, it is written quite infrequently. | 
 | 466 | 	 * | 
 | 467 | 	 * The lock is declared along with zone->lock because it is | 
 | 468 | 	 * frequently read in proximity to zone->lock.  It's good to | 
 | 469 | 	 * give them a chance of being in the same cacheline. | 
 | 470 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | 	unsigned long		spanned_pages;	/* total size, including holes */ | 
 | 472 | 	unsigned long		present_pages;	/* amount of memory (excluding holes) */ | 
 | 473 |  | 
 | 474 | 	/* | 
 | 475 | 	 * rarely used fields: | 
 | 476 | 	 */ | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 477 | 	const char		*name; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 478 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 480 | typedef enum { | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 481 | 	ZONE_RECLAIM_LOCKED,		/* prevents concurrent reclaim */ | 
| David Rientjes | 098d7f1 | 2007-10-16 23:25:55 -0700 | [diff] [blame] | 482 | 	ZONE_OOM_LOCKED,		/* zone is in OOM killer zonelist */ | 
| Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 483 | 	ZONE_CONGESTED,			/* zone has many dirty pages backed by | 
 | 484 | 					 * a congested BDI | 
 | 485 | 					 */ | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 486 | } zone_flags_t; | 
 | 487 |  | 
 | 488 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | 
 | 489 | { | 
 | 490 | 	set_bit(flag, &zone->flags); | 
 | 491 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 492 |  | 
 | 493 | static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) | 
 | 494 | { | 
 | 495 | 	return test_and_set_bit(flag, &zone->flags); | 
 | 496 | } | 
 | 497 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 498 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | 
 | 499 | { | 
 | 500 | 	clear_bit(flag, &zone->flags); | 
 | 501 | } | 
 | 502 |  | 
| Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 503 | static inline int zone_is_reclaim_congested(const struct zone *zone) | 
 | 504 | { | 
 | 505 | 	return test_bit(ZONE_CONGESTED, &zone->flags); | 
 | 506 | } | 
 | 507 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 508 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 
 | 509 | { | 
 | 510 | 	return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 
 | 511 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 512 |  | 
| David Rientjes | 098d7f1 | 2007-10-16 23:25:55 -0700 | [diff] [blame] | 513 | static inline int zone_is_oom_locked(const struct zone *zone) | 
 | 514 | { | 
 | 515 | 	return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 
 | 516 | } | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 517 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | /* | 
 | 519 |  * The "priority" of VM scanning is how much of the queues we will scan in one | 
 | 520 |  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 
 | 521 |  * queues ("queue_length >> 12") during an aging round. | 
 | 522 |  */ | 
 | 523 | #define DEF_PRIORITY 12 | 
 | 524 |  | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 525 | /* Maximum number of zones on a zonelist */ | 
 | 526 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | 
 | 527 |  | 
 | 528 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 529 |  | 
 | 530 | /* | 
| Pete Zaitcev | 25a64ec | 2011-02-03 22:43:48 -0700 | [diff] [blame] | 531 |  * The NUMA zonelists are doubled because we need zonelists that restrict the | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 532 |  * allocations to a single node for GFP_THISNODE. | 
 | 533 |  * | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 534 |  * [0]	: Zonelist with fallback | 
 | 535 |  * [1]	: No fallback (GFP_THISNODE) | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 536 |  */ | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 537 | #define MAX_ZONELISTS 2 | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 538 |  | 
 | 539 |  | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 540 | /* | 
 | 541 |  * We cache key information from each zonelist for smaller cache | 
 | 542 |  * footprint when scanning for free pages in get_page_from_freelist(). | 
 | 543 |  * | 
 | 544 |  * 1) The BITMAP fullzones tracks which zones in a zonelist have come | 
 | 545 |  *    up short of free memory since the last time (last_fullzone_zap) | 
 | 546 |  *    we zero'd fullzones. | 
 | 547 |  * 2) The array z_to_n[] maps each zone in the zonelist to its node | 
 | 548 |  *    id, so that we can efficiently evaluate whether that node is | 
 | 549 |  *    set in the current tasks mems_allowed. | 
 | 550 |  * | 
 | 551 |  * Both fullzones and z_to_n[] are one-to-one with the zonelist, | 
 | 552 |  * indexed by a zones offset in the zonelist zones[] array. | 
 | 553 |  * | 
 | 554 |  * The get_page_from_freelist() routine does two scans.  During the | 
 | 555 |  * first scan, we skip zones whose corresponding bit in 'fullzones' | 
 | 556 |  * is set or whose corresponding node in current->mems_allowed (which | 
 | 557 |  * comes from cpusets) is not set.  During the second scan, we bypass | 
 | 558 |  * this zonelist_cache, to ensure we look methodically at each zone. | 
 | 559 |  * | 
 | 560 |  * Once per second, we zero out (zap) fullzones, forcing us to | 
 | 561 |  * reconsider nodes that might have regained more free memory. | 
 | 562 |  * The field last_full_zap is the time we last zapped fullzones. | 
 | 563 |  * | 
 | 564 |  * This mechanism reduces the amount of time we waste repeatedly | 
 | 565 |  * reexaming zones for free memory when they just came up low on | 
 | 566 |  * memory momentarilly ago. | 
 | 567 |  * | 
 | 568 |  * The zonelist_cache struct members logically belong in struct | 
 | 569 |  * zonelist.  However, the mempolicy zonelists constructed for | 
 | 570 |  * MPOL_BIND are intentionally variable length (and usually much | 
 | 571 |  * shorter).  A general purpose mechanism for handling structs with | 
 | 572 |  * multiple variable length members is more mechanism than we want | 
 | 573 |  * here.  We resort to some special case hackery instead. | 
 | 574 |  * | 
 | 575 |  * The MPOL_BIND zonelists don't need this zonelist_cache (in good | 
 | 576 |  * part because they are shorter), so we put the fixed length stuff | 
 | 577 |  * at the front of the zonelist struct, ending in a variable length | 
 | 578 |  * zones[], as is needed by MPOL_BIND. | 
 | 579 |  * | 
 | 580 |  * Then we put the optional zonelist cache on the end of the zonelist | 
 | 581 |  * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in | 
 | 582 |  * the fixed length portion at the front of the struct.  This pointer | 
 | 583 |  * both enables us to find the zonelist cache, and in the case of | 
 | 584 |  * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | 
 | 585 |  * to know that the zonelist cache is not there. | 
 | 586 |  * | 
 | 587 |  * The end result is that struct zonelists come in two flavors: | 
 | 588 |  *  1) The full, fixed length version, shown below, and | 
 | 589 |  *  2) The custom zonelists for MPOL_BIND. | 
 | 590 |  * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | 
 | 591 |  * | 
 | 592 |  * Even though there may be multiple CPU cores on a node modifying | 
 | 593 |  * fullzones or last_full_zap in the same zonelist_cache at the same | 
 | 594 |  * time, we don't lock it.  This is just hint data - if it is wrong now | 
 | 595 |  * and then, the allocator will still function, perhaps a bit slower. | 
 | 596 |  */ | 
 | 597 |  | 
 | 598 |  | 
 | 599 | struct zonelist_cache { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 600 | 	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */ | 
| Paul Jackson | 7253f4e | 2006-12-06 20:31:49 -0800 | [diff] [blame] | 601 | 	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */ | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 602 | 	unsigned long last_full_zap;		/* when last zap'd (jiffies) */ | 
 | 603 | }; | 
 | 604 | #else | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 605 | #define MAX_ZONELISTS 1 | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 606 | struct zonelist_cache; | 
 | 607 | #endif | 
 | 608 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | /* | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 610 |  * This struct contains information about a zone in a zonelist. It is stored | 
 | 611 |  * here to avoid dereferences into large structures and lookups of tables | 
 | 612 |  */ | 
 | 613 | struct zoneref { | 
 | 614 | 	struct zone *zone;	/* Pointer to actual zone */ | 
 | 615 | 	int zone_idx;		/* zone_idx(zoneref->zone) */ | 
 | 616 | }; | 
 | 617 |  | 
 | 618 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 |  * One allocation request operates on a zonelist. A zonelist | 
 | 620 |  * is a list of zones, the first one is the 'goal' of the | 
 | 621 |  * allocation, the other zones are fallback zones, in decreasing | 
 | 622 |  * priority. | 
 | 623 |  * | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 624 |  * If zlcache_ptr is not NULL, then it is just the address of zlcache, | 
 | 625 |  * as explained above.  If zlcache_ptr is NULL, there is no zlcache. | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 626 |  * * | 
 | 627 |  * To speed the reading of the zonelist, the zonerefs contain the zone index | 
 | 628 |  * of the entry being read. Helper functions to access information given | 
 | 629 |  * a struct zoneref are | 
 | 630 |  * | 
 | 631 |  * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs | 
 | 632 |  * zonelist_zone_idx()	- Return the index of the zone for an entry | 
 | 633 |  * zonelist_node_idx()	- Return the index of the node for an entry | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 |  */ | 
 | 635 | struct zonelist { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 636 | 	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 637 | 	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 638 | #ifdef CONFIG_NUMA | 
 | 639 | 	struct zonelist_cache zlcache;			     // optional ... | 
 | 640 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | }; | 
 | 642 |  | 
| Tejun Heo | 0ee332c | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 643 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 644 | struct node_active_region { | 
 | 645 | 	unsigned long start_pfn; | 
 | 646 | 	unsigned long end_pfn; | 
 | 647 | 	int nid; | 
 | 648 | }; | 
| Tejun Heo | 0ee332c | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 649 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 |  | 
| Heiko Carstens | 5b99cd0 | 2006-09-27 01:50:01 -0700 | [diff] [blame] | 651 | #ifndef CONFIG_DISCONTIGMEM | 
 | 652 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 
 | 653 | extern struct page *mem_map; | 
 | 654 | #endif | 
 | 655 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | /* | 
 | 657 |  * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | 
 | 658 |  * (mostly NUMA machines?) to denote a higher-level memory zone than the | 
 | 659 |  * zone denotes. | 
 | 660 |  * | 
 | 661 |  * On NUMA machines, each NUMA node would have a pg_data_t to describe | 
 | 662 |  * it's memory layout. | 
 | 663 |  * | 
 | 664 |  * Memory statistics and page replacement data structures are maintained on a | 
 | 665 |  * per-zone basis. | 
 | 666 |  */ | 
 | 667 | struct bootmem_data; | 
 | 668 | typedef struct pglist_data { | 
 | 669 | 	struct zone node_zones[MAX_NR_ZONES]; | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 670 | 	struct zonelist node_zonelists[MAX_ZONELISTS]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | 	int nr_zones; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 672 | #ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | 	struct page *node_mem_map; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 674 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
 | 675 | 	struct page_cgroup *node_page_cgroup; | 
 | 676 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 677 | #endif | 
| Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 678 | #ifndef CONFIG_NO_BOOTMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | 	struct bootmem_data *bdata; | 
| Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 680 | #endif | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 681 | #ifdef CONFIG_MEMORY_HOTPLUG | 
 | 682 | 	/* | 
 | 683 | 	 * Must be held any time you expect node_start_pfn, node_present_pages | 
 | 684 | 	 * or node_spanned_pages stay constant.  Holding this will also | 
 | 685 | 	 * guarantee that any pfn_valid() stays that way. | 
 | 686 | 	 * | 
 | 687 | 	 * Nests above zone->lock and zone->size_seqlock. | 
 | 688 | 	 */ | 
 | 689 | 	spinlock_t node_size_lock; | 
 | 690 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | 	unsigned long node_start_pfn; | 
 | 692 | 	unsigned long node_present_pages; /* total number of physical pages */ | 
 | 693 | 	unsigned long node_spanned_pages; /* total size of physical page | 
 | 694 | 					     range, including holes */ | 
 | 695 | 	int node_id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | 	wait_queue_head_t kswapd_wait; | 
| Jiang Liu | d8adde1 | 2012-07-11 14:01:52 -0700 | [diff] [blame] | 697 | 	struct task_struct *kswapd;	/* Protected by lock_memory_hotplug() */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | 	int kswapd_max_order; | 
| Mel Gorman | 9950474 | 2011-01-13 15:46:20 -0800 | [diff] [blame] | 699 | 	enum zone_type classzone_idx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | } pg_data_t; | 
 | 701 |  | 
 | 702 | #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages) | 
 | 703 | #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 704 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 705 | #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 706 | #else | 
 | 707 | #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | 
 | 708 | #endif | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 709 | #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 |  | 
| KAMEZAWA Hiroyuki | c6830c2 | 2011-06-16 17:28:07 +0900 | [diff] [blame] | 711 | #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn) | 
 | 712 |  | 
 | 713 | #define node_end_pfn(nid) ({\ | 
 | 714 | 	pg_data_t *__pgdat = NODE_DATA(nid);\ | 
 | 715 | 	__pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ | 
 | 716 | }) | 
 | 717 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 718 | #include <linux/memory_hotplug.h> | 
 | 719 |  | 
| Haicheng Li | 4eaf3f6 | 2010-05-24 14:32:52 -0700 | [diff] [blame] | 720 | extern struct mutex zonelists_mutex; | 
| Haicheng Li | 1f52250 | 2010-05-24 14:32:51 -0700 | [diff] [blame] | 721 | void build_all_zonelists(void *data); | 
| Mel Gorman | 9950474 | 2011-01-13 15:46:20 -0800 | [diff] [blame] | 722 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); | 
| Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 723 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 
 | 724 | 		int classzone_idx, int alloc_flags); | 
 | 725 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 726 | 		int classzone_idx, int alloc_flags); | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 727 | enum memmap_context { | 
 | 728 | 	MEMMAP_EARLY, | 
 | 729 | 	MEMMAP_HOTPLUG, | 
 | 730 | }; | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 731 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 732 | 				     unsigned long size, | 
 | 733 | 				     enum memmap_context context); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 734 |  | 
| Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 735 | extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); | 
 | 736 |  | 
 | 737 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) | 
 | 738 | { | 
 | 739 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
 | 740 | 	return lruvec->zone; | 
 | 741 | #else | 
 | 742 | 	return container_of(lruvec, struct zone, lruvec); | 
 | 743 | #endif | 
 | 744 | } | 
 | 745 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 
 | 747 | void memory_present(int nid, unsigned long start, unsigned long end); | 
 | 748 | #else | 
 | 749 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | 
 | 750 | #endif | 
 | 751 |  | 
| Lee Schermerhorn | 7aac789 | 2010-05-26 14:45:00 -0700 | [diff] [blame] | 752 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES | 
 | 753 | int local_memory_node(int node_id); | 
 | 754 | #else | 
 | 755 | static inline int local_memory_node(int node_id) { return node_id; }; | 
 | 756 | #endif | 
 | 757 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | 
 | 759 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
 | 760 | #endif | 
 | 761 |  | 
 | 762 | /* | 
 | 763 |  * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | 
 | 764 |  */ | 
 | 765 | #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones) | 
 | 766 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 767 | static inline int populated_zone(struct zone *zone) | 
 | 768 | { | 
 | 769 | 	return (!!zone->present_pages); | 
 | 770 | } | 
 | 771 |  | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 772 | extern int movable_zone; | 
 | 773 |  | 
 | 774 | static inline int zone_movable_is_highmem(void) | 
 | 775 | { | 
| Tejun Heo | 0ee332c | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 776 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 777 | 	return movable_zone == ZONE_HIGHMEM; | 
 | 778 | #else | 
 | 779 | 	return 0; | 
 | 780 | #endif | 
 | 781 | } | 
 | 782 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 783 | static inline int is_highmem_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 785 | #ifdef CONFIG_HIGHMEM | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 786 | 	return (idx == ZONE_HIGHMEM || | 
 | 787 | 		(idx == ZONE_MOVABLE && zone_movable_is_highmem())); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 788 | #else | 
 | 789 | 	return 0; | 
 | 790 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | } | 
 | 792 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 793 | static inline int is_normal_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | { | 
 | 795 | 	return (idx == ZONE_NORMAL); | 
 | 796 | } | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 797 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | /** | 
 | 799 |  * is_highmem - helper function to quickly check if a struct zone is a  | 
 | 800 |  *              highmem zone or not.  This is an attempt to keep references | 
 | 801 |  *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | 
 | 802 |  * @zone - pointer to struct zone variable | 
 | 803 |  */ | 
 | 804 | static inline int is_highmem(struct zone *zone) | 
 | 805 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 806 | #ifdef CONFIG_HIGHMEM | 
| Harvey Harrison | ddc81ed | 2008-04-28 02:12:07 -0700 | [diff] [blame] | 807 | 	int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; | 
 | 808 | 	return zone_off == ZONE_HIGHMEM * sizeof(*zone) || | 
 | 809 | 	       (zone_off == ZONE_MOVABLE * sizeof(*zone) && | 
 | 810 | 		zone_movable_is_highmem()); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 811 | #else | 
 | 812 | 	return 0; | 
 | 813 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | } | 
 | 815 |  | 
 | 816 | static inline int is_normal(struct zone *zone) | 
 | 817 | { | 
 | 818 | 	return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | 
 | 819 | } | 
 | 820 |  | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 821 | static inline int is_dma32(struct zone *zone) | 
 | 822 | { | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 823 | #ifdef CONFIG_ZONE_DMA32 | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 824 | 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 825 | #else | 
 | 826 | 	return 0; | 
 | 827 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 828 | } | 
 | 829 |  | 
 | 830 | static inline int is_dma(struct zone *zone) | 
 | 831 | { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 832 | #ifdef CONFIG_ZONE_DMA | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 833 | 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 834 | #else | 
 | 835 | 	return 0; | 
 | 836 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 837 | } | 
 | 838 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | /* These two functions are used to setup the per zone pages min values */ | 
 | 840 | struct ctl_table; | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 841 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | 					void __user *, size_t *, loff_t *); | 
 | 843 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 844 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | 					void __user *, size_t *, loff_t *); | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 846 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 847 | 					void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 848 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 849 | 			void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 850 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 851 | 			void __user *, size_t *, loff_t *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 |  | 
| KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 853 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 854 | 			void __user *, size_t *, loff_t *); | 
| KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 855 | extern char numa_zonelist_order[]; | 
 | 856 | #define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */ | 
 | 857 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 858 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 |  | 
 | 860 | extern struct pglist_data contig_page_data; | 
 | 861 | #define NODE_DATA(nid)		(&contig_page_data) | 
 | 862 | #define NODE_MEM_MAP(nid)	mem_map | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 864 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 |  | 
 | 866 | #include <asm/mmzone.h> | 
 | 867 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 868 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 869 |  | 
| KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 870 | extern struct pglist_data *first_online_pgdat(void); | 
 | 871 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | 
 | 872 | extern struct zone *next_zone(struct zone *zone); | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 873 |  | 
 | 874 | /** | 
| Fernando Luis Vazquez Cao | 12d15f0 | 2008-05-23 13:05:01 -0700 | [diff] [blame] | 875 |  * for_each_online_pgdat - helper macro to iterate over all online nodes | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 876 |  * @pgdat - pointer to a pg_data_t variable | 
 | 877 |  */ | 
 | 878 | #define for_each_online_pgdat(pgdat)			\ | 
 | 879 | 	for (pgdat = first_online_pgdat();		\ | 
 | 880 | 	     pgdat;					\ | 
 | 881 | 	     pgdat = next_online_pgdat(pgdat)) | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 882 | /** | 
 | 883 |  * for_each_zone - helper macro to iterate over all memory zones | 
 | 884 |  * @zone - pointer to struct zone variable | 
 | 885 |  * | 
 | 886 |  * The user only needs to declare the zone variable, for_each_zone | 
 | 887 |  * fills it in. | 
 | 888 |  */ | 
 | 889 | #define for_each_zone(zone)			        \ | 
 | 890 | 	for (zone = (first_online_pgdat())->node_zones; \ | 
 | 891 | 	     zone;					\ | 
 | 892 | 	     zone = next_zone(zone)) | 
 | 893 |  | 
| KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 894 | #define for_each_populated_zone(zone)		        \ | 
 | 895 | 	for (zone = (first_online_pgdat())->node_zones; \ | 
 | 896 | 	     zone;					\ | 
 | 897 | 	     zone = next_zone(zone))			\ | 
 | 898 | 		if (!populated_zone(zone))		\ | 
 | 899 | 			; /* do nothing */		\ | 
 | 900 | 		else | 
 | 901 |  | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 902 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) | 
 | 903 | { | 
 | 904 | 	return zoneref->zone; | 
 | 905 | } | 
 | 906 |  | 
 | 907 | static inline int zonelist_zone_idx(struct zoneref *zoneref) | 
 | 908 | { | 
 | 909 | 	return zoneref->zone_idx; | 
 | 910 | } | 
 | 911 |  | 
 | 912 | static inline int zonelist_node_idx(struct zoneref *zoneref) | 
 | 913 | { | 
 | 914 | #ifdef CONFIG_NUMA | 
 | 915 | 	/* zone_to_nid not available in this context */ | 
 | 916 | 	return zoneref->zone->node; | 
 | 917 | #else | 
 | 918 | 	return 0; | 
 | 919 | #endif /* CONFIG_NUMA */ | 
 | 920 | } | 
 | 921 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 922 | /** | 
 | 923 |  * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point | 
 | 924 |  * @z - The cursor used as a starting point for the search | 
 | 925 |  * @highest_zoneidx - The zone index of the highest zone to return | 
 | 926 |  * @nodes - An optional nodemask to filter the zonelist with | 
 | 927 |  * @zone - The first suitable zone found is returned via this parameter | 
 | 928 |  * | 
 | 929 |  * This function returns the next zone at or below a given zone index that is | 
 | 930 |  * within the allowed nodemask using a cursor as the starting point for the | 
| Mel Gorman | 5bead2a | 2008-09-13 02:33:19 -0700 | [diff] [blame] | 931 |  * search. The zoneref returned is a cursor that represents the current zone | 
 | 932 |  * being examined. It should be advanced by one before calling | 
 | 933 |  * next_zones_zonelist again. | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 934 |  */ | 
 | 935 | struct zoneref *next_zones_zonelist(struct zoneref *z, | 
 | 936 | 					enum zone_type highest_zoneidx, | 
 | 937 | 					nodemask_t *nodes, | 
 | 938 | 					struct zone **zone); | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 939 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 940 | /** | 
 | 941 |  * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist | 
 | 942 |  * @zonelist - The zonelist to search for a suitable zone | 
 | 943 |  * @highest_zoneidx - The zone index of the highest zone to return | 
 | 944 |  * @nodes - An optional nodemask to filter the zonelist with | 
 | 945 |  * @zone - The first suitable zone found is returned via this parameter | 
 | 946 |  * | 
 | 947 |  * This function returns the first zone at or below a given zone index that is | 
 | 948 |  * within the allowed nodemask. The zoneref returned is a cursor that can be | 
| Mel Gorman | 5bead2a | 2008-09-13 02:33:19 -0700 | [diff] [blame] | 949 |  * used to iterate the zonelist with next_zones_zonelist by advancing it by | 
 | 950 |  * one before calling. | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 951 |  */ | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 952 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 953 | 					enum zone_type highest_zoneidx, | 
 | 954 | 					nodemask_t *nodes, | 
 | 955 | 					struct zone **zone) | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 956 | { | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 957 | 	return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, | 
 | 958 | 								zone); | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 959 | } | 
 | 960 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 961 | /** | 
 | 962 |  * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask | 
 | 963 |  * @zone - The current zone in the iterator | 
 | 964 |  * @z - The current pointer within zonelist->zones being iterated | 
 | 965 |  * @zlist - The zonelist being iterated | 
 | 966 |  * @highidx - The zone index of the highest zone to return | 
 | 967 |  * @nodemask - Nodemask allowed by the allocator | 
 | 968 |  * | 
 | 969 |  * This iterator iterates though all zones at or below a given zone index and | 
 | 970 |  * within a given nodemask | 
 | 971 |  */ | 
 | 972 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | 
 | 973 | 	for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone);	\ | 
 | 974 | 		zone;							\ | 
| Mel Gorman | 5bead2a | 2008-09-13 02:33:19 -0700 | [diff] [blame] | 975 | 		z = next_zones_zonelist(++z, highidx, nodemask, &zone))	\ | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 976 |  | 
 | 977 | /** | 
 | 978 |  * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index | 
 | 979 |  * @zone - The current zone in the iterator | 
 | 980 |  * @z - The current pointer within zonelist->zones being iterated | 
 | 981 |  * @zlist - The zonelist being iterated | 
 | 982 |  * @highidx - The zone index of the highest zone to return | 
 | 983 |  * | 
 | 984 |  * This iterator iterates though all zones at or below a given zone index. | 
 | 985 |  */ | 
 | 986 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 987 | 	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 988 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 989 | #ifdef CONFIG_SPARSEMEM | 
 | 990 | #include <asm/sparsemem.h> | 
 | 991 | #endif | 
 | 992 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 993 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | 
| Tejun Heo | 0ee332c | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 994 | 	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) | 
| Andrew Morton | b454456 | 2008-04-28 02:12:39 -0700 | [diff] [blame] | 995 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) | 
 | 996 | { | 
 | 997 | 	return 0; | 
 | 998 | } | 
| Andy Whitcroft | b159d43 | 2005-06-23 00:07:52 -0700 | [diff] [blame] | 999 | #endif | 
 | 1000 |  | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 1001 | #ifdef CONFIG_FLATMEM | 
 | 1002 | #define pfn_to_nid(pfn)		(0) | 
 | 1003 | #endif | 
 | 1004 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1005 | #ifdef CONFIG_SPARSEMEM | 
 | 1006 |  | 
 | 1007 | /* | 
 | 1008 |  * SECTION_SHIFT    		#bits space required to store a section # | 
 | 1009 |  * | 
 | 1010 |  * PA_SECTION_SHIFT		physical address to/from section number | 
 | 1011 |  * PFN_SECTION_SHIFT		pfn to/from section number | 
 | 1012 |  */ | 
 | 1013 | #define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | 
 | 1014 |  | 
 | 1015 | #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS) | 
 | 1016 | #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT) | 
 | 1017 |  | 
 | 1018 | #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT) | 
 | 1019 |  | 
 | 1020 | #define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT) | 
 | 1021 | #define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1)) | 
 | 1022 |  | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 1023 | #define SECTION_BLOCKFLAGS_BITS \ | 
| Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 1024 | 	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 1025 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1026 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | 
 | 1027 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 
 | 1028 | #endif | 
 | 1029 |  | 
| Daniel Kiper | e3c40f3 | 2011-05-24 17:12:33 -0700 | [diff] [blame] | 1030 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 
 | 1031 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 
 | 1032 |  | 
| Daniel Kiper | a539f35 | 2011-05-24 17:12:51 -0700 | [diff] [blame] | 1033 | #define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) | 
 | 1034 | #define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK) | 
 | 1035 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1036 | struct page; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 1037 | struct page_cgroup; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1038 | struct mem_section { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1039 | 	/* | 
 | 1040 | 	 * This is, logically, a pointer to an array of struct | 
 | 1041 | 	 * pages.  However, it is stored with some other magic. | 
 | 1042 | 	 * (see sparse.c::sparse_init_one_section()) | 
 | 1043 | 	 * | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 1044 | 	 * Additionally during early boot we encode node id of | 
 | 1045 | 	 * the location of the section here to guide allocation. | 
 | 1046 | 	 * (see sparse.c::memory_present()) | 
 | 1047 | 	 * | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1048 | 	 * Making it a UL at least makes someone do a cast | 
 | 1049 | 	 * before using it wrong. | 
 | 1050 | 	 */ | 
 | 1051 | 	unsigned long section_mem_map; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 1052 |  | 
 | 1053 | 	/* See declaration of similar field in struct zone */ | 
 | 1054 | 	unsigned long *pageblock_flags; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 1055 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
 | 1056 | 	/* | 
 | 1057 | 	 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | 
 | 1058 | 	 * section. (see memcontrol.h/page_cgroup.h about this.) | 
 | 1059 | 	 */ | 
 | 1060 | 	struct page_cgroup *page_cgroup; | 
 | 1061 | 	unsigned long pad; | 
 | 1062 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1063 | }; | 
 | 1064 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 1065 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
 | 1066 | #define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section)) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 1067 | #else | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 1068 | #define SECTIONS_PER_ROOT	1 | 
 | 1069 | #endif | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 1070 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 1071 | #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT) | 
| Marcelo Roberto Jimenez | 0faa563 | 2010-05-24 14:32:47 -0700 | [diff] [blame] | 1072 | #define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 1073 | #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1) | 
 | 1074 |  | 
 | 1075 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
 | 1076 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | 
 | 1077 | #else | 
 | 1078 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | 
 | 1079 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1080 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1081 | static inline struct mem_section *__nr_to_section(unsigned long nr) | 
 | 1082 | { | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 1083 | 	if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | 
 | 1084 | 		return NULL; | 
 | 1085 | 	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1086 | } | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 1087 | extern int __section_nr(struct mem_section* ms); | 
| Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 1088 | extern unsigned long usemap_size(void); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1089 |  | 
 | 1090 | /* | 
 | 1091 |  * We use the lower bits of the mem_map pointer to store | 
 | 1092 |  * a little bit of information.  There should be at least | 
 | 1093 |  * 3 bits here due to 32-bit alignment. | 
 | 1094 |  */ | 
 | 1095 | #define	SECTION_MARKED_PRESENT	(1UL<<0) | 
 | 1096 | #define SECTION_HAS_MEM_MAP	(1UL<<1) | 
 | 1097 | #define SECTION_MAP_LAST_BIT	(1UL<<2) | 
 | 1098 | #define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1)) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 1099 | #define SECTION_NID_SHIFT	2 | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1100 |  | 
 | 1101 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | 
 | 1102 | { | 
 | 1103 | 	unsigned long map = section->section_mem_map; | 
 | 1104 | 	map &= SECTION_MAP_MASK; | 
 | 1105 | 	return (struct page *)map; | 
 | 1106 | } | 
 | 1107 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 1108 | static inline int present_section(struct mem_section *section) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1109 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 1110 | 	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1111 | } | 
 | 1112 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 1113 | static inline int present_section_nr(unsigned long nr) | 
 | 1114 | { | 
 | 1115 | 	return present_section(__nr_to_section(nr)); | 
 | 1116 | } | 
 | 1117 |  | 
 | 1118 | static inline int valid_section(struct mem_section *section) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1119 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 1120 | 	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1121 | } | 
 | 1122 |  | 
 | 1123 | static inline int valid_section_nr(unsigned long nr) | 
 | 1124 | { | 
 | 1125 | 	return valid_section(__nr_to_section(nr)); | 
 | 1126 | } | 
 | 1127 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1128 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | 
 | 1129 | { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1130 | 	return __nr_to_section(pfn_to_section_nr(pfn)); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1131 | } | 
 | 1132 |  | 
| Will Deacon | 7b7bf49 | 2011-05-19 13:21:14 +0100 | [diff] [blame] | 1133 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1134 | static inline int pfn_valid(unsigned long pfn) | 
 | 1135 | { | 
 | 1136 | 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
 | 1137 | 		return 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1138 | 	return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1139 | } | 
| Will Deacon | 7b7bf49 | 2011-05-19 13:21:14 +0100 | [diff] [blame] | 1140 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1141 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 1142 | static inline int pfn_present(unsigned long pfn) | 
 | 1143 | { | 
 | 1144 | 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
 | 1145 | 		return 0; | 
 | 1146 | 	return present_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
 | 1147 | } | 
 | 1148 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1149 | /* | 
 | 1150 |  * These are _only_ used during initialisation, therefore they | 
 | 1151 |  * can use __initdata ...  They could have names to indicate | 
 | 1152 |  * this restriction. | 
 | 1153 |  */ | 
 | 1154 | #ifdef CONFIG_NUMA | 
| Andy Whitcroft | 161599f | 2006-01-06 00:10:54 -0800 | [diff] [blame] | 1155 | #define pfn_to_nid(pfn)							\ | 
 | 1156 | ({									\ | 
 | 1157 | 	unsigned long __pfn_to_nid_pfn = (pfn);				\ | 
 | 1158 | 	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\ | 
 | 1159 | }) | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 1160 | #else | 
 | 1161 | #define pfn_to_nid(pfn)		(0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1162 | #endif | 
 | 1163 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1164 | #define early_pfn_valid(pfn)	pfn_valid(pfn) | 
 | 1165 | void sparse_init(void); | 
 | 1166 | #else | 
 | 1167 | #define sparse_init()	do {} while (0) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 1168 | #define sparse_index_init(_sec, _nid)  do {} while (0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1169 | #endif /* CONFIG_SPARSEMEM */ | 
 | 1170 |  | 
| Andy Whitcroft | 7516795 | 2006-10-21 10:24:14 -0700 | [diff] [blame] | 1171 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | 
| KAMEZAWA Hiroyuki | cc2559b | 2009-02-18 14:48:33 -0800 | [diff] [blame] | 1172 | bool early_pfn_in_nid(unsigned long pfn, int nid); | 
| Andy Whitcroft | 7516795 | 2006-10-21 10:24:14 -0700 | [diff] [blame] | 1173 | #else | 
 | 1174 | #define early_pfn_in_nid(pfn, nid)	(1) | 
 | 1175 | #endif | 
 | 1176 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1177 | #ifndef early_pfn_valid | 
 | 1178 | #define early_pfn_valid(pfn)	(1) | 
 | 1179 | #endif | 
 | 1180 |  | 
 | 1181 | void memory_present(int nid, unsigned long start, unsigned long end); | 
 | 1182 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
 | 1183 |  | 
| Andy Whitcroft | 14e0729 | 2007-05-06 14:49:14 -0700 | [diff] [blame] | 1184 | /* | 
 | 1185 |  * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | 
 | 1186 |  * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | 
 | 1187 |  * pfn_valid_within() should be used in this case; we optimise this away | 
 | 1188 |  * when we have no holes within a MAX_ORDER_NR_PAGES block. | 
 | 1189 |  */ | 
 | 1190 | #ifdef CONFIG_HOLES_IN_ZONE | 
 | 1191 | #define pfn_valid_within(pfn) pfn_valid(pfn) | 
 | 1192 | #else | 
 | 1193 | #define pfn_valid_within(pfn) (1) | 
 | 1194 | #endif | 
 | 1195 |  | 
| Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 1196 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL | 
 | 1197 | /* | 
 | 1198 |  * pfn_valid() is meant to be able to tell if a given PFN has valid memmap | 
 | 1199 |  * associated with it or not. In FLATMEM, it is expected that holes always | 
 | 1200 |  * have valid memmap as long as there is valid PFNs either side of the hole. | 
 | 1201 |  * In SPARSEMEM, it is assumed that a valid section has a memmap for the | 
 | 1202 |  * entire section. | 
 | 1203 |  * | 
 | 1204 |  * However, an ARM, and maybe other embedded architectures in the future | 
 | 1205 |  * free memmap backing holes to save memory on the assumption the memmap is | 
 | 1206 |  * never used. The page_zone linkages are then broken even though pfn_valid() | 
 | 1207 |  * returns true. A walker of the full memmap must then do this additional | 
 | 1208 |  * check to ensure the memmap they are looking at is sane by making sure | 
 | 1209 |  * the zone and PFN linkages are still valid. This is expensive, but walkers | 
 | 1210 |  * of the full memmap are extremely rare. | 
 | 1211 |  */ | 
 | 1212 | int memmap_valid_within(unsigned long pfn, | 
 | 1213 | 					struct page *page, struct zone *zone); | 
 | 1214 | #else | 
 | 1215 | static inline int memmap_valid_within(unsigned long pfn, | 
 | 1216 | 					struct page *page, struct zone *zone) | 
 | 1217 | { | 
 | 1218 | 	return 1; | 
 | 1219 | } | 
 | 1220 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | 
 | 1221 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 1222 | #endif /* !__GENERATING_BOUNDS.H */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | #endif /* !__ASSEMBLY__ */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | #endif /* _LINUX_MMZONE_H */ |