| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMZONE_H | 
|  | 2 | #define _LINUX_MMZONE_H | 
|  | 3 |  | 
|  | 4 | #ifdef __KERNEL__ | 
|  | 5 | #ifndef __ASSEMBLY__ | 
|  | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/spinlock.h> | 
|  | 8 | #include <linux/list.h> | 
|  | 9 | #include <linux/wait.h> | 
|  | 10 | #include <linux/cache.h> | 
|  | 11 | #include <linux/threads.h> | 
|  | 12 | #include <linux/numa.h> | 
|  | 13 | #include <linux/init.h> | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 14 | #include <linux/seqlock.h> | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 15 | #include <linux/nodemask.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/atomic.h> | 
| Ralf Baechle | 93ff66b | 2006-06-04 02:51:29 -0700 | [diff] [blame] | 17 | #include <asm/page.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
|  | 19 | /* Free memory management - zoned buddy allocator.  */ | 
|  | 20 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | 
|  | 21 | #define MAX_ORDER 11 | 
|  | 22 | #else | 
|  | 23 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | 
|  | 24 | #endif | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 25 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 |  | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 27 | /* | 
|  | 28 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | 
|  | 29 | * costly to service.  That is between allocation orders which should | 
|  | 30 | * coelesce naturally under reasonable reclaim pressure and those which | 
|  | 31 | * will not. | 
|  | 32 | */ | 
|  | 33 | #define PAGE_ALLOC_COSTLY_ORDER 3 | 
|  | 34 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | struct free_area { | 
|  | 36 | struct list_head	free_list; | 
|  | 37 | unsigned long		nr_free; | 
|  | 38 | }; | 
|  | 39 |  | 
|  | 40 | struct pglist_data; | 
|  | 41 |  | 
|  | 42 | /* | 
|  | 43 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | 
|  | 44 | * So add a wild amount of padding here to ensure that they fall into separate | 
|  | 45 | * cachelines.  There are very few zone structures in the machine, so space | 
|  | 46 | * consumption is not a concern here. | 
|  | 47 | */ | 
|  | 48 | #if defined(CONFIG_SMP) | 
|  | 49 | struct zone_padding { | 
|  | 50 | char x[0]; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 51 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #define ZONE_PADDING(name)	struct zone_padding name; | 
|  | 53 | #else | 
|  | 54 | #define ZONE_PADDING(name) | 
|  | 55 | #endif | 
|  | 56 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 57 | enum zone_stat_item { | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 58 | /* First 128 byte cacheline (assuming 64 bit words) */ | 
| Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 59 | NR_FREE_PAGES, | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 60 | NR_INACTIVE, | 
|  | 61 | NR_ACTIVE, | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 62 | NR_ANON_PAGES,	/* Mapped anonymous pages */ | 
|  | 63 | NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables. | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 64 | only modified from process context */ | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 65 | NR_FILE_PAGES, | 
| Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 66 | NR_FILE_DIRTY, | 
| Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 67 | NR_WRITEBACK, | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 68 | /* Second 128 byte cacheline */ | 
|  | 69 | NR_SLAB_RECLAIMABLE, | 
|  | 70 | NR_SLAB_UNRECLAIMABLE, | 
|  | 71 | NR_PAGETABLE,		/* used for pagetables */ | 
| Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 72 | NR_UNSTABLE_NFS,	/* NFS unstable pages */ | 
| Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 73 | NR_BOUNCE, | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 74 | NR_VMSCAN_WRITE, | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 75 | #ifdef CONFIG_NUMA | 
|  | 76 | NUMA_HIT,		/* allocated in intended node */ | 
|  | 77 | NUMA_MISS,		/* allocated in non intended node */ | 
|  | 78 | NUMA_FOREIGN,		/* was intended here, hit elsewhere */ | 
|  | 79 | NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */ | 
|  | 80 | NUMA_LOCAL,		/* allocation from local node */ | 
|  | 81 | NUMA_OTHER,		/* allocation from other node */ | 
|  | 82 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 83 | NR_VM_ZONE_STAT_ITEMS }; | 
|  | 84 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | struct per_cpu_pages { | 
|  | 86 | int count;		/* number of pages in the list */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | int high;		/* high watermark, emptying needed */ | 
|  | 88 | int batch;		/* chunk size for buddy add/remove */ | 
|  | 89 | struct list_head list;	/* the list of pages */ | 
|  | 90 | }; | 
|  | 91 |  | 
|  | 92 | struct per_cpu_pageset { | 
|  | 93 | struct per_cpu_pages pcp[2];	/* 0: hot.  1: cold */ | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_NUMA | 
|  | 95 | s8 expire; | 
|  | 96 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 97 | #ifdef CONFIG_SMP | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 98 | s8 stat_threshold; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 99 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | 
|  | 100 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | } ____cacheline_aligned_in_smp; | 
|  | 102 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 103 | #ifdef CONFIG_NUMA | 
|  | 104 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) | 
|  | 105 | #else | 
|  | 106 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | 
|  | 107 | #endif | 
|  | 108 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 109 | enum zone_type { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 110 | #ifdef CONFIG_ZONE_DMA | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 111 | /* | 
|  | 112 | * ZONE_DMA is used when there are devices that are not able | 
|  | 113 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | 
|  | 114 | * carve out the portion of memory that is needed for these devices. | 
|  | 115 | * The range is arch specific. | 
|  | 116 | * | 
|  | 117 | * Some examples | 
|  | 118 | * | 
|  | 119 | * Architecture		Limit | 
|  | 120 | * --------------------------- | 
|  | 121 | * parisc, ia64, sparc	<4G | 
|  | 122 | * s390			<2G | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 123 | * arm			Various | 
|  | 124 | * alpha		Unlimited or 0-16MB. | 
|  | 125 | * | 
|  | 126 | * i386, x86_64 and multiple other arches | 
|  | 127 | * 			<16M. | 
|  | 128 | */ | 
|  | 129 | ZONE_DMA, | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 130 | #endif | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 131 | #ifdef CONFIG_ZONE_DMA32 | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 132 | /* | 
|  | 133 | * x86_64 needs two ZONE_DMAs because it supports devices that are | 
|  | 134 | * only able to do DMA to the lower 16M but also 32 bit devices that | 
|  | 135 | * can only do DMA areas below 4G. | 
|  | 136 | */ | 
|  | 137 | ZONE_DMA32, | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 138 | #endif | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 139 | /* | 
|  | 140 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | 
|  | 141 | * performed on pages in ZONE_NORMAL if the DMA devices support | 
|  | 142 | * transfers to all addressable memory. | 
|  | 143 | */ | 
|  | 144 | ZONE_NORMAL, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 145 | #ifdef CONFIG_HIGHMEM | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 146 | /* | 
|  | 147 | * A memory area that is only addressable by the kernel through | 
|  | 148 | * mapping portions into its own address space. This is for example | 
|  | 149 | * used by i386 to allow the kernel to address the memory beyond | 
|  | 150 | * 900MB. The kernel will set up special mappings (page | 
|  | 151 | * table entries on i386) for each page that the kernel needs to | 
|  | 152 | * access. | 
|  | 153 | */ | 
|  | 154 | ZONE_HIGHMEM, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 155 | #endif | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 156 | ZONE_MOVABLE, | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 157 | MAX_NR_ZONES | 
|  | 158 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | /* | 
|  | 161 | * When a memory allocation must conform to specific limitations (such | 
|  | 162 | * as being suitable for DMA) the caller will pass in hints to the | 
|  | 163 | * allocator in the gfp_mask, in the zone modifier bits.  These bits | 
|  | 164 | * are used to select a priority ordered list of memory zones which | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 165 | * match the requested limits. See gfp_zone() in include/linux/gfp.h | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | */ | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 167 |  | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 168 | /* | 
|  | 169 | * Count the active zones.  Note that the use of defined(X) outside | 
|  | 170 | * #if and family is not necessarily defined so ensure we cannot use | 
|  | 171 | * it later.  Use __ZONE_COUNT to work out how many shift bits we need. | 
|  | 172 | */ | 
|  | 173 | #define __ZONE_COUNT (			\ | 
|  | 174 | defined(CONFIG_ZONE_DMA)	\ | 
|  | 175 | + defined(CONFIG_ZONE_DMA32)	\ | 
|  | 176 | + 1				\ | 
|  | 177 | + defined(CONFIG_HIGHMEM)	\ | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 178 | + 1				\ | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 179 | ) | 
|  | 180 | #if __ZONE_COUNT < 2 | 
|  | 181 | #define ZONES_SHIFT 0 | 
|  | 182 | #elif __ZONE_COUNT <= 2 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 183 | #define ZONES_SHIFT 1 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 184 | #elif __ZONE_COUNT <= 4 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 185 | #define ZONES_SHIFT 2 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 186 | #else | 
|  | 187 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 188 | #endif | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 189 | #undef __ZONE_COUNT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | struct zone { | 
|  | 192 | /* Fields commonly accessed by the page allocator */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | unsigned long		pages_min, pages_low, pages_high; | 
|  | 194 | /* | 
|  | 195 | * We don't know if the memory that we're going to allocate will be freeable | 
|  | 196 | * or/and it will be released eventually, so to avoid totally wasting several | 
|  | 197 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 
|  | 198 | * to run OOM on the lower zones despite there's tons of freeable ram | 
|  | 199 | * on the higher zones). This array is recalculated at runtime if the | 
|  | 200 | * sysctl_lowmem_reserve_ratio sysctl changes. | 
|  | 201 | */ | 
|  | 202 | unsigned long		lowmem_reserve[MAX_NR_ZONES]; | 
|  | 203 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 204 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | d5f541e | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 205 | int node; | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 206 | /* | 
|  | 207 | * zone reclaim becomes active if more unmapped pages exist. | 
|  | 208 | */ | 
| Christoph Lameter | 8417bba | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 209 | unsigned long		min_unmapped_pages; | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 210 | unsigned long		min_slab_pages; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 211 | struct per_cpu_pageset	*pageset[NR_CPUS]; | 
|  | 212 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | struct per_cpu_pageset	pageset[NR_CPUS]; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 214 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | /* | 
|  | 216 | * free areas of different sizes | 
|  | 217 | */ | 
|  | 218 | spinlock_t		lock; | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 219 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 220 | /* see spanned/present_pages for more description */ | 
|  | 221 | seqlock_t		span_seqlock; | 
|  | 222 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | struct free_area	free_area[MAX_ORDER]; | 
|  | 224 |  | 
|  | 225 |  | 
|  | 226 | ZONE_PADDING(_pad1_) | 
|  | 227 |  | 
|  | 228 | /* Fields commonly accessed by the page reclaim scanner */ | 
|  | 229 | spinlock_t		lru_lock; | 
|  | 230 | struct list_head	active_list; | 
|  | 231 | struct list_head	inactive_list; | 
|  | 232 | unsigned long		nr_scan_active; | 
|  | 233 | unsigned long		nr_scan_inactive; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | unsigned long		pages_scanned;	   /* since last reclaim */ | 
|  | 235 | int			all_unreclaimable; /* All pages pinned */ | 
|  | 236 |  | 
| Martin Hicks | 1e7e5a9 | 2005-06-21 17:14:43 -0700 | [diff] [blame] | 237 | /* A count of how many reclaimers are scanning this zone */ | 
|  | 238 | atomic_t		reclaim_in_progress; | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 239 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 240 | /* Zone statistics */ | 
|  | 241 | atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 242 |  | 
|  | 243 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | * prev_priority holds the scanning priority for this zone.  It is | 
|  | 245 | * defined as the scanning priority at which we achieved our reclaim | 
|  | 246 | * target at the previous try_to_free_pages() or balance_pgdat() | 
|  | 247 | * invokation. | 
|  | 248 | * | 
|  | 249 | * We use prev_priority as a measure of how much stress page reclaim is | 
|  | 250 | * under - it drives the swappiness decision: whether to unmap mapped | 
|  | 251 | * pages. | 
|  | 252 | * | 
| Martin Bligh | 3bb1a852 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 253 | * Access to both this field is quite racy even on uniprocessor.  But | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | * it is expected to average out OK. | 
|  | 255 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | int prev_priority; | 
|  | 257 |  | 
|  | 258 |  | 
|  | 259 | ZONE_PADDING(_pad2_) | 
|  | 260 | /* Rarely used or read-mostly fields */ | 
|  | 261 |  | 
|  | 262 | /* | 
|  | 263 | * wait_table		-- the array holding the hash table | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 264 | * wait_table_hash_nr_entries	-- the size of the hash table array | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | * wait_table_bits	-- wait_table_size == (1 << wait_table_bits) | 
|  | 266 | * | 
|  | 267 | * The purpose of all these is to keep track of the people | 
|  | 268 | * waiting for a page to become available and make them | 
|  | 269 | * runnable again when possible. The trouble is that this | 
|  | 270 | * consumes a lot of space, especially when so few things | 
|  | 271 | * wait on pages at a given time. So instead of using | 
|  | 272 | * per-page waitqueues, we use a waitqueue hash table. | 
|  | 273 | * | 
|  | 274 | * The bucket discipline is to sleep on the same queue when | 
|  | 275 | * colliding and wake all in that wait queue when removing. | 
|  | 276 | * When something wakes, it must check to be sure its page is | 
|  | 277 | * truly available, a la thundering herd. The cost of a | 
|  | 278 | * collision is great, but given the expected load of the | 
|  | 279 | * table, they should be so rare as to be outweighed by the | 
|  | 280 | * benefits from the saved space. | 
|  | 281 | * | 
|  | 282 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | 
|  | 283 | * primary users of these fields, and in mm/page_alloc.c | 
|  | 284 | * free_area_init_core() performs the initialization of them. | 
|  | 285 | */ | 
|  | 286 | wait_queue_head_t	* wait_table; | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 287 | unsigned long		wait_table_hash_nr_entries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | unsigned long		wait_table_bits; | 
|  | 289 |  | 
|  | 290 | /* | 
|  | 291 | * Discontig memory support fields. | 
|  | 292 | */ | 
|  | 293 | struct pglist_data	*zone_pgdat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 
|  | 295 | unsigned long		zone_start_pfn; | 
|  | 296 |  | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 297 | /* | 
|  | 298 | * zone_start_pfn, spanned_pages and present_pages are all | 
|  | 299 | * protected by span_seqlock.  It is a seqlock because it has | 
|  | 300 | * to be read outside of zone->lock, and it is done in the main | 
|  | 301 | * allocator path.  But, it is written quite infrequently. | 
|  | 302 | * | 
|  | 303 | * The lock is declared along with zone->lock because it is | 
|  | 304 | * frequently read in proximity to zone->lock.  It's good to | 
|  | 305 | * give them a chance of being in the same cacheline. | 
|  | 306 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | unsigned long		spanned_pages;	/* total size, including holes */ | 
|  | 308 | unsigned long		present_pages;	/* amount of memory (excluding holes) */ | 
|  | 309 |  | 
|  | 310 | /* | 
|  | 311 | * rarely used fields: | 
|  | 312 | */ | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 313 | const char		*name; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 314 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | /* | 
|  | 317 | * The "priority" of VM scanning is how much of the queues we will scan in one | 
|  | 318 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 
|  | 319 | * queues ("queue_length >> 12") during an aging round. | 
|  | 320 | */ | 
|  | 321 | #define DEF_PRIORITY 12 | 
|  | 322 |  | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 323 | /* Maximum number of zones on a zonelist */ | 
|  | 324 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | 
|  | 325 |  | 
|  | 326 | #ifdef CONFIG_NUMA | 
|  | 327 | /* | 
|  | 328 | * We cache key information from each zonelist for smaller cache | 
|  | 329 | * footprint when scanning for free pages in get_page_from_freelist(). | 
|  | 330 | * | 
|  | 331 | * 1) The BITMAP fullzones tracks which zones in a zonelist have come | 
|  | 332 | *    up short of free memory since the last time (last_fullzone_zap) | 
|  | 333 | *    we zero'd fullzones. | 
|  | 334 | * 2) The array z_to_n[] maps each zone in the zonelist to its node | 
|  | 335 | *    id, so that we can efficiently evaluate whether that node is | 
|  | 336 | *    set in the current tasks mems_allowed. | 
|  | 337 | * | 
|  | 338 | * Both fullzones and z_to_n[] are one-to-one with the zonelist, | 
|  | 339 | * indexed by a zones offset in the zonelist zones[] array. | 
|  | 340 | * | 
|  | 341 | * The get_page_from_freelist() routine does two scans.  During the | 
|  | 342 | * first scan, we skip zones whose corresponding bit in 'fullzones' | 
|  | 343 | * is set or whose corresponding node in current->mems_allowed (which | 
|  | 344 | * comes from cpusets) is not set.  During the second scan, we bypass | 
|  | 345 | * this zonelist_cache, to ensure we look methodically at each zone. | 
|  | 346 | * | 
|  | 347 | * Once per second, we zero out (zap) fullzones, forcing us to | 
|  | 348 | * reconsider nodes that might have regained more free memory. | 
|  | 349 | * The field last_full_zap is the time we last zapped fullzones. | 
|  | 350 | * | 
|  | 351 | * This mechanism reduces the amount of time we waste repeatedly | 
|  | 352 | * reexaming zones for free memory when they just came up low on | 
|  | 353 | * memory momentarilly ago. | 
|  | 354 | * | 
|  | 355 | * The zonelist_cache struct members logically belong in struct | 
|  | 356 | * zonelist.  However, the mempolicy zonelists constructed for | 
|  | 357 | * MPOL_BIND are intentionally variable length (and usually much | 
|  | 358 | * shorter).  A general purpose mechanism for handling structs with | 
|  | 359 | * multiple variable length members is more mechanism than we want | 
|  | 360 | * here.  We resort to some special case hackery instead. | 
|  | 361 | * | 
|  | 362 | * The MPOL_BIND zonelists don't need this zonelist_cache (in good | 
|  | 363 | * part because they are shorter), so we put the fixed length stuff | 
|  | 364 | * at the front of the zonelist struct, ending in a variable length | 
|  | 365 | * zones[], as is needed by MPOL_BIND. | 
|  | 366 | * | 
|  | 367 | * Then we put the optional zonelist cache on the end of the zonelist | 
|  | 368 | * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in | 
|  | 369 | * the fixed length portion at the front of the struct.  This pointer | 
|  | 370 | * both enables us to find the zonelist cache, and in the case of | 
|  | 371 | * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | 
|  | 372 | * to know that the zonelist cache is not there. | 
|  | 373 | * | 
|  | 374 | * The end result is that struct zonelists come in two flavors: | 
|  | 375 | *  1) The full, fixed length version, shown below, and | 
|  | 376 | *  2) The custom zonelists for MPOL_BIND. | 
|  | 377 | * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | 
|  | 378 | * | 
|  | 379 | * Even though there may be multiple CPU cores on a node modifying | 
|  | 380 | * fullzones or last_full_zap in the same zonelist_cache at the same | 
|  | 381 | * time, we don't lock it.  This is just hint data - if it is wrong now | 
|  | 382 | * and then, the allocator will still function, perhaps a bit slower. | 
|  | 383 | */ | 
|  | 384 |  | 
|  | 385 |  | 
|  | 386 | struct zonelist_cache { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 387 | unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */ | 
| Paul Jackson | 7253f4e | 2006-12-06 20:31:49 -0800 | [diff] [blame] | 388 | DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */ | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 389 | unsigned long last_full_zap;		/* when last zap'd (jiffies) */ | 
|  | 390 | }; | 
|  | 391 | #else | 
|  | 392 | struct zonelist_cache; | 
|  | 393 | #endif | 
|  | 394 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | /* | 
|  | 396 | * One allocation request operates on a zonelist. A zonelist | 
|  | 397 | * is a list of zones, the first one is the 'goal' of the | 
|  | 398 | * allocation, the other zones are fallback zones, in decreasing | 
|  | 399 | * priority. | 
|  | 400 | * | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 401 | * If zlcache_ptr is not NULL, then it is just the address of zlcache, | 
|  | 402 | * as explained above.  If zlcache_ptr is NULL, there is no zlcache. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | */ | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 404 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | struct zonelist { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 406 | struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache | 
|  | 407 | struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];      // NULL delimited | 
|  | 408 | #ifdef CONFIG_NUMA | 
|  | 409 | struct zonelist_cache zlcache;			     // optional ... | 
|  | 410 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | }; | 
|  | 412 |  | 
| Mel Gorman | b377fd3 | 2007-08-22 14:02:05 -0700 | [diff] [blame] | 413 | #ifdef CONFIG_NUMA | 
|  | 414 | /* | 
|  | 415 | * Only custom zonelists like MPOL_BIND need to be filtered as part of | 
|  | 416 | * policies. As described in the comment for struct zonelist_cache, these | 
|  | 417 | * zonelists will not have a zlcache so zlcache_ptr will not be set. Use | 
|  | 418 | * that to determine if the zonelists needs to be filtered or not. | 
|  | 419 | */ | 
|  | 420 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | 
|  | 421 | { | 
|  | 422 | return !zonelist->zlcache_ptr; | 
|  | 423 | } | 
|  | 424 | #else | 
|  | 425 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | 
|  | 426 | { | 
|  | 427 | return 0; | 
|  | 428 | } | 
|  | 429 | #endif /* CONFIG_NUMA */ | 
|  | 430 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 431 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 
|  | 432 | struct node_active_region { | 
|  | 433 | unsigned long start_pfn; | 
|  | 434 | unsigned long end_pfn; | 
|  | 435 | int nid; | 
|  | 436 | }; | 
|  | 437 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 |  | 
| Heiko Carstens | 5b99cd0 | 2006-09-27 01:50:01 -0700 | [diff] [blame] | 439 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 440 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 
|  | 441 | extern struct page *mem_map; | 
|  | 442 | #endif | 
|  | 443 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | /* | 
|  | 445 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | 
|  | 446 | * (mostly NUMA machines?) to denote a higher-level memory zone than the | 
|  | 447 | * zone denotes. | 
|  | 448 | * | 
|  | 449 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | 
|  | 450 | * it's memory layout. | 
|  | 451 | * | 
|  | 452 | * Memory statistics and page replacement data structures are maintained on a | 
|  | 453 | * per-zone basis. | 
|  | 454 | */ | 
|  | 455 | struct bootmem_data; | 
|  | 456 | typedef struct pglist_data { | 
|  | 457 | struct zone node_zones[MAX_NR_ZONES]; | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 458 | struct zonelist node_zonelists[MAX_NR_ZONES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | int nr_zones; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 460 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | struct page *node_mem_map; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 462 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | struct bootmem_data *bdata; | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 464 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 465 | /* | 
|  | 466 | * Must be held any time you expect node_start_pfn, node_present_pages | 
|  | 467 | * or node_spanned_pages stay constant.  Holding this will also | 
|  | 468 | * guarantee that any pfn_valid() stays that way. | 
|  | 469 | * | 
|  | 470 | * Nests above zone->lock and zone->size_seqlock. | 
|  | 471 | */ | 
|  | 472 | spinlock_t node_size_lock; | 
|  | 473 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | unsigned long node_start_pfn; | 
|  | 475 | unsigned long node_present_pages; /* total number of physical pages */ | 
|  | 476 | unsigned long node_spanned_pages; /* total size of physical page | 
|  | 477 | range, including holes */ | 
|  | 478 | int node_id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | wait_queue_head_t kswapd_wait; | 
|  | 480 | struct task_struct *kswapd; | 
|  | 481 | int kswapd_max_order; | 
|  | 482 | } pg_data_t; | 
|  | 483 |  | 
|  | 484 | #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages) | 
|  | 485 | #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 486 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 487 | #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 488 | #else | 
|  | 489 | #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | 
|  | 490 | #endif | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 491 | #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 493 | #include <linux/memory_hotplug.h> | 
|  | 494 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 
|  | 496 | unsigned long *free); | 
|  | 497 | void build_all_zonelists(void); | 
|  | 498 | void wakeup_kswapd(struct zone *zone, int order); | 
|  | 499 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 500 | int classzone_idx, int alloc_flags); | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 501 | enum memmap_context { | 
|  | 502 | MEMMAP_EARLY, | 
|  | 503 | MEMMAP_HOTPLUG, | 
|  | 504 | }; | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 505 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 506 | unsigned long size, | 
|  | 507 | enum memmap_context context); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 508 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 
|  | 510 | void memory_present(int nid, unsigned long start, unsigned long end); | 
|  | 511 | #else | 
|  | 512 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | 
|  | 513 | #endif | 
|  | 514 |  | 
|  | 515 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | 
|  | 516 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
|  | 517 | #endif | 
|  | 518 |  | 
|  | 519 | /* | 
|  | 520 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | 
|  | 521 | */ | 
|  | 522 | #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones) | 
|  | 523 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 524 | static inline int populated_zone(struct zone *zone) | 
|  | 525 | { | 
|  | 526 | return (!!zone->present_pages); | 
|  | 527 | } | 
|  | 528 |  | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 529 | extern int movable_zone; | 
|  | 530 |  | 
|  | 531 | static inline int zone_movable_is_highmem(void) | 
|  | 532 | { | 
|  | 533 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 
|  | 534 | return movable_zone == ZONE_HIGHMEM; | 
|  | 535 | #else | 
|  | 536 | return 0; | 
|  | 537 | #endif | 
|  | 538 | } | 
|  | 539 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 540 | static inline int is_highmem_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 542 | #ifdef CONFIG_HIGHMEM | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 543 | return (idx == ZONE_HIGHMEM || | 
|  | 544 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 545 | #else | 
|  | 546 | return 0; | 
|  | 547 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | } | 
|  | 549 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 550 | static inline int is_normal_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | { | 
|  | 552 | return (idx == ZONE_NORMAL); | 
|  | 553 | } | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 554 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | /** | 
|  | 556 | * is_highmem - helper function to quickly check if a struct zone is a | 
|  | 557 | *              highmem zone or not.  This is an attempt to keep references | 
|  | 558 | *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | 
|  | 559 | * @zone - pointer to struct zone variable | 
|  | 560 | */ | 
|  | 561 | static inline int is_highmem(struct zone *zone) | 
|  | 562 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 563 | #ifdef CONFIG_HIGHMEM | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 564 | int zone_idx = zone - zone->zone_pgdat->node_zones; | 
|  | 565 | return zone_idx == ZONE_HIGHMEM || | 
|  | 566 | (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 567 | #else | 
|  | 568 | return 0; | 
|  | 569 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } | 
|  | 571 |  | 
|  | 572 | static inline int is_normal(struct zone *zone) | 
|  | 573 | { | 
|  | 574 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | 
|  | 575 | } | 
|  | 576 |  | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 577 | static inline int is_dma32(struct zone *zone) | 
|  | 578 | { | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 579 | #ifdef CONFIG_ZONE_DMA32 | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 580 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 581 | #else | 
|  | 582 | return 0; | 
|  | 583 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 584 | } | 
|  | 585 |  | 
|  | 586 | static inline int is_dma(struct zone *zone) | 
|  | 587 | { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 588 | #ifdef CONFIG_ZONE_DMA | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 589 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 590 | #else | 
|  | 591 | return 0; | 
|  | 592 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 593 | } | 
|  | 594 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | /* These two functions are used to setup the per zone pages min values */ | 
|  | 596 | struct ctl_table; | 
|  | 597 | struct file; | 
|  | 598 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, | 
|  | 599 | void __user *, size_t *, loff_t *); | 
|  | 600 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | 
|  | 601 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | 
|  | 602 | void __user *, size_t *, loff_t *); | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 603 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, | 
|  | 604 | void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 605 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | 
|  | 606 | struct file *, void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 607 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | 
|  | 608 | struct file *, void __user *, size_t *, loff_t *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 |  | 
| KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 610 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | 
|  | 611 | struct file *, void __user *, size_t *, loff_t *); | 
|  | 612 | extern char numa_zonelist_order[]; | 
|  | 613 | #define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */ | 
|  | 614 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | #include <linux/topology.h> | 
|  | 616 | /* Returns the number of the current Node. */ | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 617 | #ifndef numa_node_id | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 618 | #define numa_node_id()		(cpu_to_node(raw_smp_processor_id())) | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 619 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 621 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 |  | 
|  | 623 | extern struct pglist_data contig_page_data; | 
|  | 624 | #define NODE_DATA(nid)		(&contig_page_data) | 
|  | 625 | #define NODE_MEM_MAP(nid)	mem_map | 
|  | 626 | #define MAX_NODES_SHIFT		1 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 628 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 |  | 
|  | 630 | #include <asm/mmzone.h> | 
|  | 631 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 632 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 633 |  | 
| KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 634 | extern struct pglist_data *first_online_pgdat(void); | 
|  | 635 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | 
|  | 636 | extern struct zone *next_zone(struct zone *zone); | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 637 |  | 
|  | 638 | /** | 
|  | 639 | * for_each_pgdat - helper macro to iterate over all nodes | 
|  | 640 | * @pgdat - pointer to a pg_data_t variable | 
|  | 641 | */ | 
|  | 642 | #define for_each_online_pgdat(pgdat)			\ | 
|  | 643 | for (pgdat = first_online_pgdat();		\ | 
|  | 644 | pgdat;					\ | 
|  | 645 | pgdat = next_online_pgdat(pgdat)) | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 646 | /** | 
|  | 647 | * for_each_zone - helper macro to iterate over all memory zones | 
|  | 648 | * @zone - pointer to struct zone variable | 
|  | 649 | * | 
|  | 650 | * The user only needs to declare the zone variable, for_each_zone | 
|  | 651 | * fills it in. | 
|  | 652 | */ | 
|  | 653 | #define for_each_zone(zone)			        \ | 
|  | 654 | for (zone = (first_online_pgdat())->node_zones; \ | 
|  | 655 | zone;					\ | 
|  | 656 | zone = next_zone(zone)) | 
|  | 657 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 658 | #ifdef CONFIG_SPARSEMEM | 
|  | 659 | #include <asm/sparsemem.h> | 
|  | 660 | #endif | 
|  | 661 |  | 
| Andi Kleen | 07808b7 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 662 | #if BITS_PER_LONG == 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | /* | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 664 | * with 32 bit page->flags field, we reserve 9 bits for node/zone info. | 
|  | 665 | * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | */ | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 667 | #define FLAGS_RESERVED		9 | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 668 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | #elif BITS_PER_LONG == 64 | 
|  | 670 | /* | 
|  | 671 | * with 64 bit flags field, there's plenty of room. | 
|  | 672 | */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 673 | #define FLAGS_RESERVED		32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 |  | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 675 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 |  | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 677 | #error BITS_PER_LONG not defined | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | #endif | 
|  | 680 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 681 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | 
|  | 682 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 
| Andy Whitcroft | b159d43 | 2005-06-23 00:07:52 -0700 | [diff] [blame] | 683 | #define early_pfn_to_nid(nid)  (0UL) | 
|  | 684 | #endif | 
|  | 685 |  | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 686 | #ifdef CONFIG_FLATMEM | 
|  | 687 | #define pfn_to_nid(pfn)		(0) | 
|  | 688 | #endif | 
|  | 689 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 690 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 
|  | 691 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 
|  | 692 |  | 
|  | 693 | #ifdef CONFIG_SPARSEMEM | 
|  | 694 |  | 
|  | 695 | /* | 
|  | 696 | * SECTION_SHIFT    		#bits space required to store a section # | 
|  | 697 | * | 
|  | 698 | * PA_SECTION_SHIFT		physical address to/from section number | 
|  | 699 | * PFN_SECTION_SHIFT		pfn to/from section number | 
|  | 700 | */ | 
|  | 701 | #define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | 
|  | 702 |  | 
|  | 703 | #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS) | 
|  | 704 | #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT) | 
|  | 705 |  | 
|  | 706 | #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT) | 
|  | 707 |  | 
|  | 708 | #define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT) | 
|  | 709 | #define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1)) | 
|  | 710 |  | 
|  | 711 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | 
|  | 712 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 
|  | 713 | #endif | 
|  | 714 |  | 
|  | 715 | struct page; | 
|  | 716 | struct mem_section { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 717 | /* | 
|  | 718 | * This is, logically, a pointer to an array of struct | 
|  | 719 | * pages.  However, it is stored with some other magic. | 
|  | 720 | * (see sparse.c::sparse_init_one_section()) | 
|  | 721 | * | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 722 | * Additionally during early boot we encode node id of | 
|  | 723 | * the location of the section here to guide allocation. | 
|  | 724 | * (see sparse.c::memory_present()) | 
|  | 725 | * | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 726 | * Making it a UL at least makes someone do a cast | 
|  | 727 | * before using it wrong. | 
|  | 728 | */ | 
|  | 729 | unsigned long section_mem_map; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 730 | }; | 
|  | 731 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 732 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
|  | 733 | #define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section)) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 734 | #else | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 735 | #define SECTIONS_PER_ROOT	1 | 
|  | 736 | #endif | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 737 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 738 | #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT) | 
|  | 739 | #define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | 
|  | 740 | #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1) | 
|  | 741 |  | 
|  | 742 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
|  | 743 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | 
|  | 744 | #else | 
|  | 745 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | 
|  | 746 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 747 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 748 | static inline struct mem_section *__nr_to_section(unsigned long nr) | 
|  | 749 | { | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 750 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | 
|  | 751 | return NULL; | 
|  | 752 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 753 | } | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 754 | extern int __section_nr(struct mem_section* ms); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 755 |  | 
|  | 756 | /* | 
|  | 757 | * We use the lower bits of the mem_map pointer to store | 
|  | 758 | * a little bit of information.  There should be at least | 
|  | 759 | * 3 bits here due to 32-bit alignment. | 
|  | 760 | */ | 
|  | 761 | #define	SECTION_MARKED_PRESENT	(1UL<<0) | 
|  | 762 | #define SECTION_HAS_MEM_MAP	(1UL<<1) | 
|  | 763 | #define SECTION_MAP_LAST_BIT	(1UL<<2) | 
|  | 764 | #define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1)) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 765 | #define SECTION_NID_SHIFT	2 | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 766 |  | 
|  | 767 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | 
|  | 768 | { | 
|  | 769 | unsigned long map = section->section_mem_map; | 
|  | 770 | map &= SECTION_MAP_MASK; | 
|  | 771 | return (struct page *)map; | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | static inline int valid_section(struct mem_section *section) | 
|  | 775 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 776 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 777 | } | 
|  | 778 |  | 
|  | 779 | static inline int section_has_mem_map(struct mem_section *section) | 
|  | 780 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 781 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 782 | } | 
|  | 783 |  | 
|  | 784 | static inline int valid_section_nr(unsigned long nr) | 
|  | 785 | { | 
|  | 786 | return valid_section(__nr_to_section(nr)); | 
|  | 787 | } | 
|  | 788 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 789 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | 
|  | 790 | { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 791 | return __nr_to_section(pfn_to_section_nr(pfn)); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 792 | } | 
|  | 793 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 794 | static inline int pfn_valid(unsigned long pfn) | 
|  | 795 | { | 
|  | 796 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
|  | 797 | return 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 798 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 799 | } | 
|  | 800 |  | 
|  | 801 | /* | 
|  | 802 | * These are _only_ used during initialisation, therefore they | 
|  | 803 | * can use __initdata ...  They could have names to indicate | 
|  | 804 | * this restriction. | 
|  | 805 | */ | 
|  | 806 | #ifdef CONFIG_NUMA | 
| Andy Whitcroft | 161599f | 2006-01-06 00:10:54 -0800 | [diff] [blame] | 807 | #define pfn_to_nid(pfn)							\ | 
|  | 808 | ({									\ | 
|  | 809 | unsigned long __pfn_to_nid_pfn = (pfn);				\ | 
|  | 810 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\ | 
|  | 811 | }) | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 812 | #else | 
|  | 813 | #define pfn_to_nid(pfn)		(0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 814 | #endif | 
|  | 815 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 816 | #define early_pfn_valid(pfn)	pfn_valid(pfn) | 
|  | 817 | void sparse_init(void); | 
|  | 818 | #else | 
|  | 819 | #define sparse_init()	do {} while (0) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 820 | #define sparse_index_init(_sec, _nid)  do {} while (0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 821 | #endif /* CONFIG_SPARSEMEM */ | 
|  | 822 |  | 
| Andy Whitcroft | 7516795 | 2006-10-21 10:24:14 -0700 | [diff] [blame] | 823 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | 
|  | 824 | #define early_pfn_in_nid(pfn, nid)	(early_pfn_to_nid(pfn) == (nid)) | 
|  | 825 | #else | 
|  | 826 | #define early_pfn_in_nid(pfn, nid)	(1) | 
|  | 827 | #endif | 
|  | 828 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 829 | #ifndef early_pfn_valid | 
|  | 830 | #define early_pfn_valid(pfn)	(1) | 
|  | 831 | #endif | 
|  | 832 |  | 
|  | 833 | void memory_present(int nid, unsigned long start, unsigned long end); | 
|  | 834 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
|  | 835 |  | 
| Andy Whitcroft | 14e0729 | 2007-05-06 14:49:14 -0700 | [diff] [blame] | 836 | /* | 
|  | 837 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | 
|  | 838 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | 
|  | 839 | * pfn_valid_within() should be used in this case; we optimise this away | 
|  | 840 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | 
|  | 841 | */ | 
|  | 842 | #ifdef CONFIG_HOLES_IN_ZONE | 
|  | 843 | #define pfn_valid_within(pfn) pfn_valid(pfn) | 
|  | 844 | #else | 
|  | 845 | #define pfn_valid_within(pfn) (1) | 
|  | 846 | #endif | 
|  | 847 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | #endif /* !__ASSEMBLY__ */ | 
|  | 849 | #endif /* __KERNEL__ */ | 
|  | 850 | #endif /* _LINUX_MMZONE_H */ |