| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMZONE_H | 
|  | 2 | #define _LINUX_MMZONE_H | 
|  | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #ifndef __ASSEMBLY__ | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 5 | #ifndef __GENERATING_BOUNDS_H | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/spinlock.h> | 
|  | 8 | #include <linux/list.h> | 
|  | 9 | #include <linux/wait.h> | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 10 | #include <linux/bitops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/cache.h> | 
|  | 12 | #include <linux/threads.h> | 
|  | 13 | #include <linux/numa.h> | 
|  | 14 | #include <linux/init.h> | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 15 | #include <linux/seqlock.h> | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 16 | #include <linux/nodemask.h> | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 17 | #include <linux/pageblock-flags.h> | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 18 | #include <linux/bounds.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/atomic.h> | 
| Ralf Baechle | 93ff66b | 2006-06-04 02:51:29 -0700 | [diff] [blame] | 20 | #include <asm/page.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  | 
|  | 22 | /* Free memory management - zoned buddy allocator.  */ | 
|  | 23 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | 
|  | 24 | #define MAX_ORDER 11 | 
|  | 25 | #else | 
|  | 26 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | 
|  | 27 | #endif | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 28 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 |  | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 30 | /* | 
|  | 31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | 
|  | 32 | * costly to service.  That is between allocation orders which should | 
|  | 33 | * coelesce naturally under reasonable reclaim pressure and those which | 
|  | 34 | * will not. | 
|  | 35 | */ | 
|  | 36 | #define PAGE_ALLOC_COSTLY_ORDER 3 | 
|  | 37 |  | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 38 | #define MIGRATE_UNMOVABLE     0 | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 39 | #define MIGRATE_RECLAIMABLE   1 | 
|  | 40 | #define MIGRATE_MOVABLE       2 | 
| Mel Gorman | 64c5e13 | 2007-10-16 01:25:59 -0700 | [diff] [blame] | 41 | #define MIGRATE_RESERVE       3 | 
| KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 42 | #define MIGRATE_ISOLATE       4 /* can't allocate from here */ | 
|  | 43 | #define MIGRATE_TYPES         5 | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 44 |  | 
|  | 45 | #define for_each_migratetype_order(order, type) \ | 
|  | 46 | for (order = 0; order < MAX_ORDER; order++) \ | 
|  | 47 | for (type = 0; type < MIGRATE_TYPES; type++) | 
|  | 48 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 49 | extern int page_group_by_mobility_disabled; | 
|  | 50 |  | 
|  | 51 | static inline int get_pageblock_migratetype(struct page *page) | 
|  | 52 | { | 
|  | 53 | if (unlikely(page_group_by_mobility_disabled)) | 
|  | 54 | return MIGRATE_UNMOVABLE; | 
|  | 55 |  | 
|  | 56 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | 
|  | 57 | } | 
|  | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | struct free_area { | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 60 | struct list_head	free_list[MIGRATE_TYPES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | unsigned long		nr_free; | 
|  | 62 | }; | 
|  | 63 |  | 
|  | 64 | struct pglist_data; | 
|  | 65 |  | 
|  | 66 | /* | 
|  | 67 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | 
|  | 68 | * So add a wild amount of padding here to ensure that they fall into separate | 
|  | 69 | * cachelines.  There are very few zone structures in the machine, so space | 
|  | 70 | * consumption is not a concern here. | 
|  | 71 | */ | 
|  | 72 | #if defined(CONFIG_SMP) | 
|  | 73 | struct zone_padding { | 
|  | 74 | char x[0]; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 75 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | #define ZONE_PADDING(name)	struct zone_padding name; | 
|  | 77 | #else | 
|  | 78 | #define ZONE_PADDING(name) | 
|  | 79 | #endif | 
|  | 80 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 81 | enum zone_stat_item { | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 82 | /* First 128 byte cacheline (assuming 64 bit words) */ | 
| Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 83 | NR_FREE_PAGES, | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 84 | NR_LRU_BASE, | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 85 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | 
|  | 86 | NR_ACTIVE_ANON,		/*  "     "     "   "       "         */ | 
|  | 87 | NR_INACTIVE_FILE,	/*  "     "     "   "       "         */ | 
|  | 88 | NR_ACTIVE_FILE,		/*  "     "     "   "       "         */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 89 | #ifdef CONFIG_UNEVICTABLE_LRU | 
|  | 90 | NR_UNEVICTABLE,		/*  "     "     "   "       "         */ | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 91 | NR_MLOCK,		/* mlock()ed pages found and moved off LRU */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 92 | #else | 
|  | 93 | NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */ | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 94 | NR_MLOCK = NR_ACTIVE_FILE, | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 95 | #endif | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 96 | NR_ANON_PAGES,	/* Mapped anonymous pages */ | 
|  | 97 | NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables. | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 98 | only modified from process context */ | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 99 | NR_FILE_PAGES, | 
| Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 100 | NR_FILE_DIRTY, | 
| Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 101 | NR_WRITEBACK, | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 102 | NR_SLAB_RECLAIMABLE, | 
|  | 103 | NR_SLAB_UNRECLAIMABLE, | 
|  | 104 | NR_PAGETABLE,		/* used for pagetables */ | 
| Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 105 | NR_UNSTABLE_NFS,	/* NFS unstable pages */ | 
| Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 106 | NR_BOUNCE, | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 107 | NR_VMSCAN_WRITE, | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 108 | /* Second 128 byte cacheline */ | 
| Miklos Szeredi | fc3ba69 | 2008-04-30 00:54:38 -0700 | [diff] [blame] | 109 | NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */ | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 110 | #ifdef CONFIG_NUMA | 
|  | 111 | NUMA_HIT,		/* allocated in intended node */ | 
|  | 112 | NUMA_MISS,		/* allocated in non intended node */ | 
|  | 113 | NUMA_FOREIGN,		/* was intended here, hit elsewhere */ | 
|  | 114 | NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */ | 
|  | 115 | NUMA_LOCAL,		/* allocation from local node */ | 
|  | 116 | NUMA_OTHER,		/* allocation from other node */ | 
|  | 117 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 118 | NR_VM_ZONE_STAT_ITEMS }; | 
|  | 119 |  | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 120 | /* | 
|  | 121 | * We do arithmetic on the LRU lists in various places in the code, | 
|  | 122 | * so it is important to keep the active lists LRU_ACTIVE higher in | 
|  | 123 | * the array than the corresponding inactive lists, and to keep | 
|  | 124 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | 
|  | 125 | * | 
|  | 126 | * This has to be kept in sync with the statistics in zone_stat_item | 
|  | 127 | * above and the descriptions in vmstat_text in mm/vmstat.c | 
|  | 128 | */ | 
|  | 129 | #define LRU_BASE 0 | 
|  | 130 | #define LRU_ACTIVE 1 | 
|  | 131 | #define LRU_FILE 2 | 
|  | 132 |  | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 133 | enum lru_list { | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 134 | LRU_INACTIVE_ANON = LRU_BASE, | 
|  | 135 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | 
|  | 136 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | 
|  | 137 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 138 | #ifdef CONFIG_UNEVICTABLE_LRU | 
|  | 139 | LRU_UNEVICTABLE, | 
|  | 140 | #else | 
|  | 141 | LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */ | 
|  | 142 | #endif | 
|  | 143 | NR_LRU_LISTS | 
|  | 144 | }; | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 145 |  | 
|  | 146 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) | 
|  | 147 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 148 | #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) | 
|  | 149 |  | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 150 | static inline int is_file_lru(enum lru_list l) | 
|  | 151 | { | 
|  | 152 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); | 
|  | 153 | } | 
|  | 154 |  | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 155 | static inline int is_active_lru(enum lru_list l) | 
|  | 156 | { | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 157 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 158 | } | 
|  | 159 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 160 | static inline int is_unevictable_lru(enum lru_list l) | 
|  | 161 | { | 
|  | 162 | #ifdef CONFIG_UNEVICTABLE_LRU | 
|  | 163 | return (l == LRU_UNEVICTABLE); | 
|  | 164 | #else | 
|  | 165 | return 0; | 
|  | 166 | #endif | 
|  | 167 | } | 
|  | 168 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | struct per_cpu_pages { | 
|  | 170 | int count;		/* number of pages in the list */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | int high;		/* high watermark, emptying needed */ | 
|  | 172 | int batch;		/* chunk size for buddy add/remove */ | 
|  | 173 | struct list_head list;	/* the list of pages */ | 
|  | 174 | }; | 
|  | 175 |  | 
|  | 176 | struct per_cpu_pageset { | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 177 | struct per_cpu_pages pcp; | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 178 | #ifdef CONFIG_NUMA | 
|  | 179 | s8 expire; | 
|  | 180 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 181 | #ifdef CONFIG_SMP | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 182 | s8 stat_threshold; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 183 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | 
|  | 184 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } ____cacheline_aligned_in_smp; | 
|  | 186 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 187 | #ifdef CONFIG_NUMA | 
|  | 188 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) | 
|  | 189 | #else | 
|  | 190 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | 
|  | 191 | #endif | 
|  | 192 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 193 | #endif /* !__GENERATING_BOUNDS.H */ | 
|  | 194 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 195 | enum zone_type { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 196 | #ifdef CONFIG_ZONE_DMA | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 197 | /* | 
|  | 198 | * ZONE_DMA is used when there are devices that are not able | 
|  | 199 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | 
|  | 200 | * carve out the portion of memory that is needed for these devices. | 
|  | 201 | * The range is arch specific. | 
|  | 202 | * | 
|  | 203 | * Some examples | 
|  | 204 | * | 
|  | 205 | * Architecture		Limit | 
|  | 206 | * --------------------------- | 
|  | 207 | * parisc, ia64, sparc	<4G | 
|  | 208 | * s390			<2G | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 209 | * arm			Various | 
|  | 210 | * alpha		Unlimited or 0-16MB. | 
|  | 211 | * | 
|  | 212 | * i386, x86_64 and multiple other arches | 
|  | 213 | * 			<16M. | 
|  | 214 | */ | 
|  | 215 | ZONE_DMA, | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 216 | #endif | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 217 | #ifdef CONFIG_ZONE_DMA32 | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 218 | /* | 
|  | 219 | * x86_64 needs two ZONE_DMAs because it supports devices that are | 
|  | 220 | * only able to do DMA to the lower 16M but also 32 bit devices that | 
|  | 221 | * can only do DMA areas below 4G. | 
|  | 222 | */ | 
|  | 223 | ZONE_DMA32, | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 224 | #endif | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 225 | /* | 
|  | 226 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | 
|  | 227 | * performed on pages in ZONE_NORMAL if the DMA devices support | 
|  | 228 | * transfers to all addressable memory. | 
|  | 229 | */ | 
|  | 230 | ZONE_NORMAL, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 231 | #ifdef CONFIG_HIGHMEM | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 232 | /* | 
|  | 233 | * A memory area that is only addressable by the kernel through | 
|  | 234 | * mapping portions into its own address space. This is for example | 
|  | 235 | * used by i386 to allow the kernel to address the memory beyond | 
|  | 236 | * 900MB. The kernel will set up special mappings (page | 
|  | 237 | * table entries on i386) for each page that the kernel needs to | 
|  | 238 | * access. | 
|  | 239 | */ | 
|  | 240 | ZONE_HIGHMEM, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 241 | #endif | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 242 | ZONE_MOVABLE, | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 243 | __MAX_NR_ZONES | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 244 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 246 | #ifndef __GENERATING_BOUNDS_H | 
|  | 247 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | /* | 
|  | 249 | * When a memory allocation must conform to specific limitations (such | 
|  | 250 | * as being suitable for DMA) the caller will pass in hints to the | 
|  | 251 | * allocator in the gfp_mask, in the zone modifier bits.  These bits | 
|  | 252 | * are used to select a priority ordered list of memory zones which | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 253 | * match the requested limits. See gfp_zone() in include/linux/gfp.h | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | */ | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 255 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 256 | #if MAX_NR_ZONES < 2 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 257 | #define ZONES_SHIFT 0 | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 258 | #elif MAX_NR_ZONES <= 2 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 259 | #define ZONES_SHIFT 1 | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 260 | #elif MAX_NR_ZONES <= 4 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 261 | #define ZONES_SHIFT 2 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 262 | #else | 
|  | 263 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 264 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | struct zone { | 
|  | 267 | /* Fields commonly accessed by the page allocator */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | unsigned long		pages_min, pages_low, pages_high; | 
|  | 269 | /* | 
|  | 270 | * We don't know if the memory that we're going to allocate will be freeable | 
|  | 271 | * or/and it will be released eventually, so to avoid totally wasting several | 
|  | 272 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 
|  | 273 | * to run OOM on the lower zones despite there's tons of freeable ram | 
|  | 274 | * on the higher zones). This array is recalculated at runtime if the | 
|  | 275 | * sysctl_lowmem_reserve_ratio sysctl changes. | 
|  | 276 | */ | 
|  | 277 | unsigned long		lowmem_reserve[MAX_NR_ZONES]; | 
|  | 278 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 279 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | d5f541e | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 280 | int node; | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 281 | /* | 
|  | 282 | * zone reclaim becomes active if more unmapped pages exist. | 
|  | 283 | */ | 
| Christoph Lameter | 8417bba | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 284 | unsigned long		min_unmapped_pages; | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 285 | unsigned long		min_slab_pages; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 286 | struct per_cpu_pageset	*pageset[NR_CPUS]; | 
|  | 287 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | struct per_cpu_pageset	pageset[NR_CPUS]; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 289 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | /* | 
|  | 291 | * free areas of different sizes | 
|  | 292 | */ | 
|  | 293 | spinlock_t		lock; | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 294 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 295 | /* see spanned/present_pages for more description */ | 
|  | 296 | seqlock_t		span_seqlock; | 
|  | 297 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | struct free_area	free_area[MAX_ORDER]; | 
|  | 299 |  | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 300 | #ifndef CONFIG_SPARSEMEM | 
|  | 301 | /* | 
| Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 302 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 303 | * In SPARSEMEM, this map is stored in struct mem_section | 
|  | 304 | */ | 
|  | 305 | unsigned long		*pageblock_flags; | 
|  | 306 | #endif /* CONFIG_SPARSEMEM */ | 
|  | 307 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 |  | 
|  | 309 | ZONE_PADDING(_pad1_) | 
|  | 310 |  | 
|  | 311 | /* Fields commonly accessed by the page reclaim scanner */ | 
|  | 312 | spinlock_t		lru_lock; | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 313 | struct { | 
|  | 314 | struct list_head list; | 
|  | 315 | unsigned long nr_scan; | 
|  | 316 | } lru[NR_LRU_LISTS]; | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 317 |  | 
|  | 318 | /* | 
|  | 319 | * The pageout code in vmscan.c keeps track of how many of the | 
|  | 320 | * mem/swap backed and file backed pages are refeferenced. | 
|  | 321 | * The higher the rotated/scanned ratio, the more valuable | 
|  | 322 | * that cache is. | 
|  | 323 | * | 
|  | 324 | * The anon LRU stats live in [0], file LRU stats in [1] | 
|  | 325 | */ | 
|  | 326 | unsigned long		recent_rotated[2]; | 
|  | 327 | unsigned long		recent_scanned[2]; | 
|  | 328 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | unsigned long		pages_scanned;	   /* since last reclaim */ | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 330 | unsigned long		flags;		   /* zone flags, see below */ | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 331 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 332 | /* Zone statistics */ | 
|  | 333 | atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 334 |  | 
|  | 335 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | * prev_priority holds the scanning priority for this zone.  It is | 
|  | 337 | * defined as the scanning priority at which we achieved our reclaim | 
|  | 338 | * target at the previous try_to_free_pages() or balance_pgdat() | 
|  | 339 | * invokation. | 
|  | 340 | * | 
|  | 341 | * We use prev_priority as a measure of how much stress page reclaim is | 
|  | 342 | * under - it drives the swappiness decision: whether to unmap mapped | 
|  | 343 | * pages. | 
|  | 344 | * | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 345 | * Access to both this field is quite racy even on uniprocessor.  But | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | * it is expected to average out OK. | 
|  | 347 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | int prev_priority; | 
|  | 349 |  | 
| Rik van Riel | 556adec | 2008-10-18 20:26:34 -0700 | [diff] [blame] | 350 | /* | 
|  | 351 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on | 
|  | 352 | * this zone's LRU.  Maintained by the pageout code. | 
|  | 353 | */ | 
|  | 354 | unsigned int inactive_ratio; | 
|  | 355 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 |  | 
|  | 357 | ZONE_PADDING(_pad2_) | 
|  | 358 | /* Rarely used or read-mostly fields */ | 
|  | 359 |  | 
|  | 360 | /* | 
|  | 361 | * wait_table		-- the array holding the hash table | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 362 | * wait_table_hash_nr_entries	-- the size of the hash table array | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | * wait_table_bits	-- wait_table_size == (1 << wait_table_bits) | 
|  | 364 | * | 
|  | 365 | * The purpose of all these is to keep track of the people | 
|  | 366 | * waiting for a page to become available and make them | 
|  | 367 | * runnable again when possible. The trouble is that this | 
|  | 368 | * consumes a lot of space, especially when so few things | 
|  | 369 | * wait on pages at a given time. So instead of using | 
|  | 370 | * per-page waitqueues, we use a waitqueue hash table. | 
|  | 371 | * | 
|  | 372 | * The bucket discipline is to sleep on the same queue when | 
|  | 373 | * colliding and wake all in that wait queue when removing. | 
|  | 374 | * When something wakes, it must check to be sure its page is | 
|  | 375 | * truly available, a la thundering herd. The cost of a | 
|  | 376 | * collision is great, but given the expected load of the | 
|  | 377 | * table, they should be so rare as to be outweighed by the | 
|  | 378 | * benefits from the saved space. | 
|  | 379 | * | 
|  | 380 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | 
|  | 381 | * primary users of these fields, and in mm/page_alloc.c | 
|  | 382 | * free_area_init_core() performs the initialization of them. | 
|  | 383 | */ | 
|  | 384 | wait_queue_head_t	* wait_table; | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 385 | unsigned long		wait_table_hash_nr_entries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | unsigned long		wait_table_bits; | 
|  | 387 |  | 
|  | 388 | /* | 
|  | 389 | * Discontig memory support fields. | 
|  | 390 | */ | 
|  | 391 | struct pglist_data	*zone_pgdat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 
|  | 393 | unsigned long		zone_start_pfn; | 
|  | 394 |  | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 395 | /* | 
|  | 396 | * zone_start_pfn, spanned_pages and present_pages are all | 
|  | 397 | * protected by span_seqlock.  It is a seqlock because it has | 
|  | 398 | * to be read outside of zone->lock, and it is done in the main | 
|  | 399 | * allocator path.  But, it is written quite infrequently. | 
|  | 400 | * | 
|  | 401 | * The lock is declared along with zone->lock because it is | 
|  | 402 | * frequently read in proximity to zone->lock.  It's good to | 
|  | 403 | * give them a chance of being in the same cacheline. | 
|  | 404 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | unsigned long		spanned_pages;	/* total size, including holes */ | 
|  | 406 | unsigned long		present_pages;	/* amount of memory (excluding holes) */ | 
|  | 407 |  | 
|  | 408 | /* | 
|  | 409 | * rarely used fields: | 
|  | 410 | */ | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 411 | const char		*name; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 412 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 414 | typedef enum { | 
|  | 415 | ZONE_ALL_UNRECLAIMABLE,		/* all pages pinned */ | 
|  | 416 | ZONE_RECLAIM_LOCKED,		/* prevents concurrent reclaim */ | 
| David Rientjes | 098d7f1 | 2007-10-16 23:25:55 -0700 | [diff] [blame] | 417 | ZONE_OOM_LOCKED,		/* zone is in OOM killer zonelist */ | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 418 | } zone_flags_t; | 
|  | 419 |  | 
|  | 420 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | 
|  | 421 | { | 
|  | 422 | set_bit(flag, &zone->flags); | 
|  | 423 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 424 |  | 
|  | 425 | static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) | 
|  | 426 | { | 
|  | 427 | return test_and_set_bit(flag, &zone->flags); | 
|  | 428 | } | 
|  | 429 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 430 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | 
|  | 431 | { | 
|  | 432 | clear_bit(flag, &zone->flags); | 
|  | 433 | } | 
|  | 434 |  | 
|  | 435 | static inline int zone_is_all_unreclaimable(const struct zone *zone) | 
|  | 436 | { | 
|  | 437 | return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | 
|  | 438 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 439 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 440 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 
|  | 441 | { | 
|  | 442 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 
|  | 443 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 444 |  | 
| David Rientjes | 098d7f1 | 2007-10-16 23:25:55 -0700 | [diff] [blame] | 445 | static inline int zone_is_oom_locked(const struct zone *zone) | 
|  | 446 | { | 
|  | 447 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 
|  | 448 | } | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 449 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | /* | 
|  | 451 | * The "priority" of VM scanning is how much of the queues we will scan in one | 
|  | 452 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 
|  | 453 | * queues ("queue_length >> 12") during an aging round. | 
|  | 454 | */ | 
|  | 455 | #define DEF_PRIORITY 12 | 
|  | 456 |  | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 457 | /* Maximum number of zones on a zonelist */ | 
|  | 458 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | 
|  | 459 |  | 
|  | 460 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 461 |  | 
|  | 462 | /* | 
|  | 463 | * The NUMA zonelists are doubled becausse we need zonelists that restrict the | 
|  | 464 | * allocations to a single node for GFP_THISNODE. | 
|  | 465 | * | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 466 | * [0]	: Zonelist with fallback | 
|  | 467 | * [1]	: No fallback (GFP_THISNODE) | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 468 | */ | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 469 | #define MAX_ZONELISTS 2 | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 470 |  | 
|  | 471 |  | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 472 | /* | 
|  | 473 | * We cache key information from each zonelist for smaller cache | 
|  | 474 | * footprint when scanning for free pages in get_page_from_freelist(). | 
|  | 475 | * | 
|  | 476 | * 1) The BITMAP fullzones tracks which zones in a zonelist have come | 
|  | 477 | *    up short of free memory since the last time (last_fullzone_zap) | 
|  | 478 | *    we zero'd fullzones. | 
|  | 479 | * 2) The array z_to_n[] maps each zone in the zonelist to its node | 
|  | 480 | *    id, so that we can efficiently evaluate whether that node is | 
|  | 481 | *    set in the current tasks mems_allowed. | 
|  | 482 | * | 
|  | 483 | * Both fullzones and z_to_n[] are one-to-one with the zonelist, | 
|  | 484 | * indexed by a zones offset in the zonelist zones[] array. | 
|  | 485 | * | 
|  | 486 | * The get_page_from_freelist() routine does two scans.  During the | 
|  | 487 | * first scan, we skip zones whose corresponding bit in 'fullzones' | 
|  | 488 | * is set or whose corresponding node in current->mems_allowed (which | 
|  | 489 | * comes from cpusets) is not set.  During the second scan, we bypass | 
|  | 490 | * this zonelist_cache, to ensure we look methodically at each zone. | 
|  | 491 | * | 
|  | 492 | * Once per second, we zero out (zap) fullzones, forcing us to | 
|  | 493 | * reconsider nodes that might have regained more free memory. | 
|  | 494 | * The field last_full_zap is the time we last zapped fullzones. | 
|  | 495 | * | 
|  | 496 | * This mechanism reduces the amount of time we waste repeatedly | 
|  | 497 | * reexaming zones for free memory when they just came up low on | 
|  | 498 | * memory momentarilly ago. | 
|  | 499 | * | 
|  | 500 | * The zonelist_cache struct members logically belong in struct | 
|  | 501 | * zonelist.  However, the mempolicy zonelists constructed for | 
|  | 502 | * MPOL_BIND are intentionally variable length (and usually much | 
|  | 503 | * shorter).  A general purpose mechanism for handling structs with | 
|  | 504 | * multiple variable length members is more mechanism than we want | 
|  | 505 | * here.  We resort to some special case hackery instead. | 
|  | 506 | * | 
|  | 507 | * The MPOL_BIND zonelists don't need this zonelist_cache (in good | 
|  | 508 | * part because they are shorter), so we put the fixed length stuff | 
|  | 509 | * at the front of the zonelist struct, ending in a variable length | 
|  | 510 | * zones[], as is needed by MPOL_BIND. | 
|  | 511 | * | 
|  | 512 | * Then we put the optional zonelist cache on the end of the zonelist | 
|  | 513 | * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in | 
|  | 514 | * the fixed length portion at the front of the struct.  This pointer | 
|  | 515 | * both enables us to find the zonelist cache, and in the case of | 
|  | 516 | * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | 
|  | 517 | * to know that the zonelist cache is not there. | 
|  | 518 | * | 
|  | 519 | * The end result is that struct zonelists come in two flavors: | 
|  | 520 | *  1) The full, fixed length version, shown below, and | 
|  | 521 | *  2) The custom zonelists for MPOL_BIND. | 
|  | 522 | * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | 
|  | 523 | * | 
|  | 524 | * Even though there may be multiple CPU cores on a node modifying | 
|  | 525 | * fullzones or last_full_zap in the same zonelist_cache at the same | 
|  | 526 | * time, we don't lock it.  This is just hint data - if it is wrong now | 
|  | 527 | * and then, the allocator will still function, perhaps a bit slower. | 
|  | 528 | */ | 
|  | 529 |  | 
|  | 530 |  | 
|  | 531 | struct zonelist_cache { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 532 | unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */ | 
| Paul Jackson | 7253f4e | 2006-12-06 20:31:49 -0800 | [diff] [blame] | 533 | DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */ | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 534 | unsigned long last_full_zap;		/* when last zap'd (jiffies) */ | 
|  | 535 | }; | 
|  | 536 | #else | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 537 | #define MAX_ZONELISTS 1 | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 538 | struct zonelist_cache; | 
|  | 539 | #endif | 
|  | 540 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | /* | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 542 | * This struct contains information about a zone in a zonelist. It is stored | 
|  | 543 | * here to avoid dereferences into large structures and lookups of tables | 
|  | 544 | */ | 
|  | 545 | struct zoneref { | 
|  | 546 | struct zone *zone;	/* Pointer to actual zone */ | 
|  | 547 | int zone_idx;		/* zone_idx(zoneref->zone) */ | 
|  | 548 | }; | 
|  | 549 |  | 
|  | 550 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | * One allocation request operates on a zonelist. A zonelist | 
|  | 552 | * is a list of zones, the first one is the 'goal' of the | 
|  | 553 | * allocation, the other zones are fallback zones, in decreasing | 
|  | 554 | * priority. | 
|  | 555 | * | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 556 | * If zlcache_ptr is not NULL, then it is just the address of zlcache, | 
|  | 557 | * as explained above.  If zlcache_ptr is NULL, there is no zlcache. | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 558 | * * | 
|  | 559 | * To speed the reading of the zonelist, the zonerefs contain the zone index | 
|  | 560 | * of the entry being read. Helper functions to access information given | 
|  | 561 | * a struct zoneref are | 
|  | 562 | * | 
|  | 563 | * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs | 
|  | 564 | * zonelist_zone_idx()	- Return the index of the zone for an entry | 
|  | 565 | * zonelist_node_idx()	- Return the index of the node for an entry | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | */ | 
|  | 567 | struct zonelist { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 568 | struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 569 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 570 | #ifdef CONFIG_NUMA | 
|  | 571 | struct zonelist_cache zlcache;			     // optional ... | 
|  | 572 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | }; | 
|  | 574 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 575 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 
|  | 576 | struct node_active_region { | 
|  | 577 | unsigned long start_pfn; | 
|  | 578 | unsigned long end_pfn; | 
|  | 579 | int nid; | 
|  | 580 | }; | 
|  | 581 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 |  | 
| Heiko Carstens | 5b99cd0 | 2006-09-27 01:50:01 -0700 | [diff] [blame] | 583 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 584 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 
|  | 585 | extern struct page *mem_map; | 
|  | 586 | #endif | 
|  | 587 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 | /* | 
|  | 589 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | 
|  | 590 | * (mostly NUMA machines?) to denote a higher-level memory zone than the | 
|  | 591 | * zone denotes. | 
|  | 592 | * | 
|  | 593 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | 
|  | 594 | * it's memory layout. | 
|  | 595 | * | 
|  | 596 | * Memory statistics and page replacement data structures are maintained on a | 
|  | 597 | * per-zone basis. | 
|  | 598 | */ | 
|  | 599 | struct bootmem_data; | 
|  | 600 | typedef struct pglist_data { | 
|  | 601 | struct zone node_zones[MAX_NR_ZONES]; | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 602 | struct zonelist node_zonelists[MAX_ZONELISTS]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | int nr_zones; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 604 | #ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | struct page *node_mem_map; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 606 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
|  | 607 | struct page_cgroup *node_page_cgroup; | 
|  | 608 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 609 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | struct bootmem_data *bdata; | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 611 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 612 | /* | 
|  | 613 | * Must be held any time you expect node_start_pfn, node_present_pages | 
|  | 614 | * or node_spanned_pages stay constant.  Holding this will also | 
|  | 615 | * guarantee that any pfn_valid() stays that way. | 
|  | 616 | * | 
|  | 617 | * Nests above zone->lock and zone->size_seqlock. | 
|  | 618 | */ | 
|  | 619 | spinlock_t node_size_lock; | 
|  | 620 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | unsigned long node_start_pfn; | 
|  | 622 | unsigned long node_present_pages; /* total number of physical pages */ | 
|  | 623 | unsigned long node_spanned_pages; /* total size of physical page | 
|  | 624 | range, including holes */ | 
|  | 625 | int node_id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | wait_queue_head_t kswapd_wait; | 
|  | 627 | struct task_struct *kswapd; | 
|  | 628 | int kswapd_max_order; | 
|  | 629 | } pg_data_t; | 
|  | 630 |  | 
|  | 631 | #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages) | 
|  | 632 | #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 633 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 634 | #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 635 | #else | 
|  | 636 | #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | 
|  | 637 | #endif | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 638 | #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 640 | #include <linux/memory_hotplug.h> | 
|  | 641 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 
|  | 643 | unsigned long *free); | 
|  | 644 | void build_all_zonelists(void); | 
|  | 645 | void wakeup_kswapd(struct zone *zone, int order); | 
|  | 646 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 647 | int classzone_idx, int alloc_flags); | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 648 | enum memmap_context { | 
|  | 649 | MEMMAP_EARLY, | 
|  | 650 | MEMMAP_HOTPLUG, | 
|  | 651 | }; | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 652 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 653 | unsigned long size, | 
|  | 654 | enum memmap_context context); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 655 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 
|  | 657 | void memory_present(int nid, unsigned long start, unsigned long end); | 
|  | 658 | #else | 
|  | 659 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | 
|  | 660 | #endif | 
|  | 661 |  | 
|  | 662 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | 
|  | 663 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
|  | 664 | #endif | 
|  | 665 |  | 
|  | 666 | /* | 
|  | 667 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | 
|  | 668 | */ | 
|  | 669 | #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones) | 
|  | 670 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 671 | static inline int populated_zone(struct zone *zone) | 
|  | 672 | { | 
|  | 673 | return (!!zone->present_pages); | 
|  | 674 | } | 
|  | 675 |  | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 676 | extern int movable_zone; | 
|  | 677 |  | 
|  | 678 | static inline int zone_movable_is_highmem(void) | 
|  | 679 | { | 
|  | 680 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 
|  | 681 | return movable_zone == ZONE_HIGHMEM; | 
|  | 682 | #else | 
|  | 683 | return 0; | 
|  | 684 | #endif | 
|  | 685 | } | 
|  | 686 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 687 | static inline int is_highmem_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 689 | #ifdef CONFIG_HIGHMEM | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 690 | return (idx == ZONE_HIGHMEM || | 
|  | 691 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 692 | #else | 
|  | 693 | return 0; | 
|  | 694 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | } | 
|  | 696 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 697 | static inline int is_normal_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | { | 
|  | 699 | return (idx == ZONE_NORMAL); | 
|  | 700 | } | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 701 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | /** | 
|  | 703 | * is_highmem - helper function to quickly check if a struct zone is a | 
|  | 704 | *              highmem zone or not.  This is an attempt to keep references | 
|  | 705 | *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | 
|  | 706 | * @zone - pointer to struct zone variable | 
|  | 707 | */ | 
|  | 708 | static inline int is_highmem(struct zone *zone) | 
|  | 709 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 710 | #ifdef CONFIG_HIGHMEM | 
| Harvey Harrison | ddc81ed | 2008-04-28 02:12:07 -0700 | [diff] [blame] | 711 | int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; | 
|  | 712 | return zone_off == ZONE_HIGHMEM * sizeof(*zone) || | 
|  | 713 | (zone_off == ZONE_MOVABLE * sizeof(*zone) && | 
|  | 714 | zone_movable_is_highmem()); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 715 | #else | 
|  | 716 | return 0; | 
|  | 717 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | } | 
|  | 719 |  | 
|  | 720 | static inline int is_normal(struct zone *zone) | 
|  | 721 | { | 
|  | 722 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | 
|  | 723 | } | 
|  | 724 |  | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 725 | static inline int is_dma32(struct zone *zone) | 
|  | 726 | { | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 727 | #ifdef CONFIG_ZONE_DMA32 | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 728 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 729 | #else | 
|  | 730 | return 0; | 
|  | 731 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 732 | } | 
|  | 733 |  | 
|  | 734 | static inline int is_dma(struct zone *zone) | 
|  | 735 | { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 736 | #ifdef CONFIG_ZONE_DMA | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 737 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 738 | #else | 
|  | 739 | return 0; | 
|  | 740 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 741 | } | 
|  | 742 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | /* These two functions are used to setup the per zone pages min values */ | 
|  | 744 | struct ctl_table; | 
|  | 745 | struct file; | 
|  | 746 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, | 
|  | 747 | void __user *, size_t *, loff_t *); | 
|  | 748 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | 
|  | 749 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | 
|  | 750 | void __user *, size_t *, loff_t *); | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 751 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, | 
|  | 752 | void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 753 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | 
|  | 754 | struct file *, void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 755 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | 
|  | 756 | struct file *, void __user *, size_t *, loff_t *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 |  | 
| KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 758 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | 
|  | 759 | struct file *, void __user *, size_t *, loff_t *); | 
|  | 760 | extern char numa_zonelist_order[]; | 
|  | 761 | #define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */ | 
|  | 762 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | #include <linux/topology.h> | 
|  | 764 | /* Returns the number of the current Node. */ | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 765 | #ifndef numa_node_id | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 766 | #define numa_node_id()		(cpu_to_node(raw_smp_processor_id())) | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 767 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 769 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 |  | 
|  | 771 | extern struct pglist_data contig_page_data; | 
|  | 772 | #define NODE_DATA(nid)		(&contig_page_data) | 
|  | 773 | #define NODE_MEM_MAP(nid)	mem_map | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 775 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 |  | 
|  | 777 | #include <asm/mmzone.h> | 
|  | 778 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 779 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 780 |  | 
| KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 781 | extern struct pglist_data *first_online_pgdat(void); | 
|  | 782 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | 
|  | 783 | extern struct zone *next_zone(struct zone *zone); | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 784 |  | 
|  | 785 | /** | 
| Fernando Luis Vazquez Cao | 12d15f0 | 2008-05-23 13:05:01 -0700 | [diff] [blame] | 786 | * for_each_online_pgdat - helper macro to iterate over all online nodes | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 787 | * @pgdat - pointer to a pg_data_t variable | 
|  | 788 | */ | 
|  | 789 | #define for_each_online_pgdat(pgdat)			\ | 
|  | 790 | for (pgdat = first_online_pgdat();		\ | 
|  | 791 | pgdat;					\ | 
|  | 792 | pgdat = next_online_pgdat(pgdat)) | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 793 | /** | 
|  | 794 | * for_each_zone - helper macro to iterate over all memory zones | 
|  | 795 | * @zone - pointer to struct zone variable | 
|  | 796 | * | 
|  | 797 | * The user only needs to declare the zone variable, for_each_zone | 
|  | 798 | * fills it in. | 
|  | 799 | */ | 
|  | 800 | #define for_each_zone(zone)			        \ | 
|  | 801 | for (zone = (first_online_pgdat())->node_zones; \ | 
|  | 802 | zone;					\ | 
|  | 803 | zone = next_zone(zone)) | 
|  | 804 |  | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 805 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) | 
|  | 806 | { | 
|  | 807 | return zoneref->zone; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | static inline int zonelist_zone_idx(struct zoneref *zoneref) | 
|  | 811 | { | 
|  | 812 | return zoneref->zone_idx; | 
|  | 813 | } | 
|  | 814 |  | 
|  | 815 | static inline int zonelist_node_idx(struct zoneref *zoneref) | 
|  | 816 | { | 
|  | 817 | #ifdef CONFIG_NUMA | 
|  | 818 | /* zone_to_nid not available in this context */ | 
|  | 819 | return zoneref->zone->node; | 
|  | 820 | #else | 
|  | 821 | return 0; | 
|  | 822 | #endif /* CONFIG_NUMA */ | 
|  | 823 | } | 
|  | 824 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 825 | /** | 
|  | 826 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point | 
|  | 827 | * @z - The cursor used as a starting point for the search | 
|  | 828 | * @highest_zoneidx - The zone index of the highest zone to return | 
|  | 829 | * @nodes - An optional nodemask to filter the zonelist with | 
|  | 830 | * @zone - The first suitable zone found is returned via this parameter | 
|  | 831 | * | 
|  | 832 | * This function returns the next zone at or below a given zone index that is | 
|  | 833 | * within the allowed nodemask using a cursor as the starting point for the | 
| Mel Gorman | 5bead2a | 2008-09-13 02:33:19 -0700 | [diff] [blame] | 834 | * search. The zoneref returned is a cursor that represents the current zone | 
|  | 835 | * being examined. It should be advanced by one before calling | 
|  | 836 | * next_zones_zonelist again. | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 837 | */ | 
|  | 838 | struct zoneref *next_zones_zonelist(struct zoneref *z, | 
|  | 839 | enum zone_type highest_zoneidx, | 
|  | 840 | nodemask_t *nodes, | 
|  | 841 | struct zone **zone); | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 842 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 843 | /** | 
|  | 844 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist | 
|  | 845 | * @zonelist - The zonelist to search for a suitable zone | 
|  | 846 | * @highest_zoneidx - The zone index of the highest zone to return | 
|  | 847 | * @nodes - An optional nodemask to filter the zonelist with | 
|  | 848 | * @zone - The first suitable zone found is returned via this parameter | 
|  | 849 | * | 
|  | 850 | * This function returns the first zone at or below a given zone index that is | 
|  | 851 | * within the allowed nodemask. The zoneref returned is a cursor that can be | 
| Mel Gorman | 5bead2a | 2008-09-13 02:33:19 -0700 | [diff] [blame] | 852 | * used to iterate the zonelist with next_zones_zonelist by advancing it by | 
|  | 853 | * one before calling. | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 854 | */ | 
| Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 855 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 856 | enum zone_type highest_zoneidx, | 
|  | 857 | nodemask_t *nodes, | 
|  | 858 | struct zone **zone) | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 859 | { | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 860 | return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, | 
|  | 861 | zone); | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 862 | } | 
|  | 863 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 864 | /** | 
|  | 865 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask | 
|  | 866 | * @zone - The current zone in the iterator | 
|  | 867 | * @z - The current pointer within zonelist->zones being iterated | 
|  | 868 | * @zlist - The zonelist being iterated | 
|  | 869 | * @highidx - The zone index of the highest zone to return | 
|  | 870 | * @nodemask - Nodemask allowed by the allocator | 
|  | 871 | * | 
|  | 872 | * This iterator iterates though all zones at or below a given zone index and | 
|  | 873 | * within a given nodemask | 
|  | 874 | */ | 
|  | 875 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | 
|  | 876 | for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone);	\ | 
|  | 877 | zone;							\ | 
| Mel Gorman | 5bead2a | 2008-09-13 02:33:19 -0700 | [diff] [blame] | 878 | z = next_zones_zonelist(++z, highidx, nodemask, &zone))	\ | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 879 |  | 
|  | 880 | /** | 
|  | 881 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index | 
|  | 882 | * @zone - The current zone in the iterator | 
|  | 883 | * @z - The current pointer within zonelist->zones being iterated | 
|  | 884 | * @zlist - The zonelist being iterated | 
|  | 885 | * @highidx - The zone index of the highest zone to return | 
|  | 886 | * | 
|  | 887 | * This iterator iterates though all zones at or below a given zone index. | 
|  | 888 | */ | 
|  | 889 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 890 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 891 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 892 | #ifdef CONFIG_SPARSEMEM | 
|  | 893 | #include <asm/sparsemem.h> | 
|  | 894 | #endif | 
|  | 895 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 896 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | 
|  | 897 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 
| Andrew Morton | b454456 | 2008-04-28 02:12:39 -0700 | [diff] [blame] | 898 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) | 
|  | 899 | { | 
|  | 900 | return 0; | 
|  | 901 | } | 
| Andy Whitcroft | b159d43 | 2005-06-23 00:07:52 -0700 | [diff] [blame] | 902 | #endif | 
|  | 903 |  | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 904 | #ifdef CONFIG_FLATMEM | 
|  | 905 | #define pfn_to_nid(pfn)		(0) | 
|  | 906 | #endif | 
|  | 907 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 908 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 
|  | 909 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 
|  | 910 |  | 
|  | 911 | #ifdef CONFIG_SPARSEMEM | 
|  | 912 |  | 
|  | 913 | /* | 
|  | 914 | * SECTION_SHIFT    		#bits space required to store a section # | 
|  | 915 | * | 
|  | 916 | * PA_SECTION_SHIFT		physical address to/from section number | 
|  | 917 | * PFN_SECTION_SHIFT		pfn to/from section number | 
|  | 918 | */ | 
|  | 919 | #define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | 
|  | 920 |  | 
|  | 921 | #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS) | 
|  | 922 | #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT) | 
|  | 923 |  | 
|  | 924 | #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT) | 
|  | 925 |  | 
|  | 926 | #define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT) | 
|  | 927 | #define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1)) | 
|  | 928 |  | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 929 | #define SECTION_BLOCKFLAGS_BITS \ | 
| Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 930 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 931 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 932 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | 
|  | 933 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 
|  | 934 | #endif | 
|  | 935 |  | 
|  | 936 | struct page; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 937 | struct page_cgroup; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 938 | struct mem_section { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 939 | /* | 
|  | 940 | * This is, logically, a pointer to an array of struct | 
|  | 941 | * pages.  However, it is stored with some other magic. | 
|  | 942 | * (see sparse.c::sparse_init_one_section()) | 
|  | 943 | * | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 944 | * Additionally during early boot we encode node id of | 
|  | 945 | * the location of the section here to guide allocation. | 
|  | 946 | * (see sparse.c::memory_present()) | 
|  | 947 | * | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 948 | * Making it a UL at least makes someone do a cast | 
|  | 949 | * before using it wrong. | 
|  | 950 | */ | 
|  | 951 | unsigned long section_mem_map; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 952 |  | 
|  | 953 | /* See declaration of similar field in struct zone */ | 
|  | 954 | unsigned long *pageblock_flags; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 955 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
|  | 956 | /* | 
|  | 957 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | 
|  | 958 | * section. (see memcontrol.h/page_cgroup.h about this.) | 
|  | 959 | */ | 
|  | 960 | struct page_cgroup *page_cgroup; | 
|  | 961 | unsigned long pad; | 
|  | 962 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 963 | }; | 
|  | 964 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 965 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
|  | 966 | #define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section)) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 967 | #else | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 968 | #define SECTIONS_PER_ROOT	1 | 
|  | 969 | #endif | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 970 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 971 | #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT) | 
|  | 972 | #define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | 
|  | 973 | #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1) | 
|  | 974 |  | 
|  | 975 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
|  | 976 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | 
|  | 977 | #else | 
|  | 978 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | 
|  | 979 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 980 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 981 | static inline struct mem_section *__nr_to_section(unsigned long nr) | 
|  | 982 | { | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 983 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | 
|  | 984 | return NULL; | 
|  | 985 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 986 | } | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 987 | extern int __section_nr(struct mem_section* ms); | 
| Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 988 | extern unsigned long usemap_size(void); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 989 |  | 
|  | 990 | /* | 
|  | 991 | * We use the lower bits of the mem_map pointer to store | 
|  | 992 | * a little bit of information.  There should be at least | 
|  | 993 | * 3 bits here due to 32-bit alignment. | 
|  | 994 | */ | 
|  | 995 | #define	SECTION_MARKED_PRESENT	(1UL<<0) | 
|  | 996 | #define SECTION_HAS_MEM_MAP	(1UL<<1) | 
|  | 997 | #define SECTION_MAP_LAST_BIT	(1UL<<2) | 
|  | 998 | #define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1)) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 999 | #define SECTION_NID_SHIFT	2 | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1000 |  | 
|  | 1001 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | 
|  | 1002 | { | 
|  | 1003 | unsigned long map = section->section_mem_map; | 
|  | 1004 | map &= SECTION_MAP_MASK; | 
|  | 1005 | return (struct page *)map; | 
|  | 1006 | } | 
|  | 1007 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 1008 | static inline int present_section(struct mem_section *section) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1009 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 1010 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1011 | } | 
|  | 1012 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 1013 | static inline int present_section_nr(unsigned long nr) | 
|  | 1014 | { | 
|  | 1015 | return present_section(__nr_to_section(nr)); | 
|  | 1016 | } | 
|  | 1017 |  | 
|  | 1018 | static inline int valid_section(struct mem_section *section) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1019 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 1020 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | static inline int valid_section_nr(unsigned long nr) | 
|  | 1024 | { | 
|  | 1025 | return valid_section(__nr_to_section(nr)); | 
|  | 1026 | } | 
|  | 1027 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1028 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | 
|  | 1029 | { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1030 | return __nr_to_section(pfn_to_section_nr(pfn)); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1031 | } | 
|  | 1032 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1033 | static inline int pfn_valid(unsigned long pfn) | 
|  | 1034 | { | 
|  | 1035 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
|  | 1036 | return 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1037 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1038 | } | 
|  | 1039 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 1040 | static inline int pfn_present(unsigned long pfn) | 
|  | 1041 | { | 
|  | 1042 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
|  | 1043 | return 0; | 
|  | 1044 | return present_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
|  | 1045 | } | 
|  | 1046 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1047 | /* | 
|  | 1048 | * These are _only_ used during initialisation, therefore they | 
|  | 1049 | * can use __initdata ...  They could have names to indicate | 
|  | 1050 | * this restriction. | 
|  | 1051 | */ | 
|  | 1052 | #ifdef CONFIG_NUMA | 
| Andy Whitcroft | 161599f | 2006-01-06 00:10:54 -0800 | [diff] [blame] | 1053 | #define pfn_to_nid(pfn)							\ | 
|  | 1054 | ({									\ | 
|  | 1055 | unsigned long __pfn_to_nid_pfn = (pfn);				\ | 
|  | 1056 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\ | 
|  | 1057 | }) | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 1058 | #else | 
|  | 1059 | #define pfn_to_nid(pfn)		(0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1060 | #endif | 
|  | 1061 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1062 | #define early_pfn_valid(pfn)	pfn_valid(pfn) | 
|  | 1063 | void sparse_init(void); | 
|  | 1064 | #else | 
|  | 1065 | #define sparse_init()	do {} while (0) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 1066 | #define sparse_index_init(_sec, _nid)  do {} while (0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1067 | #endif /* CONFIG_SPARSEMEM */ | 
|  | 1068 |  | 
| Andy Whitcroft | 7516795 | 2006-10-21 10:24:14 -0700 | [diff] [blame] | 1069 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | 
|  | 1070 | #define early_pfn_in_nid(pfn, nid)	(early_pfn_to_nid(pfn) == (nid)) | 
|  | 1071 | #else | 
|  | 1072 | #define early_pfn_in_nid(pfn, nid)	(1) | 
|  | 1073 | #endif | 
|  | 1074 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1075 | #ifndef early_pfn_valid | 
|  | 1076 | #define early_pfn_valid(pfn)	(1) | 
|  | 1077 | #endif | 
|  | 1078 |  | 
|  | 1079 | void memory_present(int nid, unsigned long start, unsigned long end); | 
|  | 1080 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
|  | 1081 |  | 
| Andy Whitcroft | 14e0729 | 2007-05-06 14:49:14 -0700 | [diff] [blame] | 1082 | /* | 
|  | 1083 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | 
|  | 1084 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | 
|  | 1085 | * pfn_valid_within() should be used in this case; we optimise this away | 
|  | 1086 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | 
|  | 1087 | */ | 
|  | 1088 | #ifdef CONFIG_HOLES_IN_ZONE | 
|  | 1089 | #define pfn_valid_within(pfn) pfn_valid(pfn) | 
|  | 1090 | #else | 
|  | 1091 | #define pfn_valid_within(pfn) (1) | 
|  | 1092 | #endif | 
|  | 1093 |  | 
| Christoph Lameter | 9796547 | 2008-04-28 02:12:54 -0700 | [diff] [blame] | 1094 | #endif /* !__GENERATING_BOUNDS.H */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | #endif /* !__ASSEMBLY__ */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | #endif /* _LINUX_MMZONE_H */ |