| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMZONE_H | 
 | 2 | #define _LINUX_MMZONE_H | 
 | 3 |  | 
 | 4 | #ifdef __KERNEL__ | 
 | 5 | #ifndef __ASSEMBLY__ | 
 | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/spinlock.h> | 
 | 8 | #include <linux/list.h> | 
 | 9 | #include <linux/wait.h> | 
 | 10 | #include <linux/cache.h> | 
 | 11 | #include <linux/threads.h> | 
 | 12 | #include <linux/numa.h> | 
 | 13 | #include <linux/init.h> | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 14 | #include <linux/seqlock.h> | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 15 | #include <linux/nodemask.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/atomic.h> | 
| Ralf Baechle | 93ff66b | 2006-06-04 02:51:29 -0700 | [diff] [blame] | 17 | #include <asm/page.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
 | 19 | /* Free memory management - zoned buddy allocator.  */ | 
 | 20 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | 
 | 21 | #define MAX_ORDER 11 | 
 | 22 | #else | 
 | 23 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | 
 | 24 | #endif | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 25 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 |  | 
 | 27 | struct free_area { | 
 | 28 | 	struct list_head	free_list; | 
 | 29 | 	unsigned long		nr_free; | 
 | 30 | }; | 
 | 31 |  | 
 | 32 | struct pglist_data; | 
 | 33 |  | 
 | 34 | /* | 
 | 35 |  * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | 
 | 36 |  * So add a wild amount of padding here to ensure that they fall into separate | 
 | 37 |  * cachelines.  There are very few zone structures in the machine, so space | 
 | 38 |  * consumption is not a concern here. | 
 | 39 |  */ | 
 | 40 | #if defined(CONFIG_SMP) | 
 | 41 | struct zone_padding { | 
 | 42 | 	char x[0]; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 43 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #define ZONE_PADDING(name)	struct zone_padding name; | 
 | 45 | #else | 
 | 46 | #define ZONE_PADDING(name) | 
 | 47 | #endif | 
 | 48 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 49 | enum zone_stat_item { | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 50 | 	NR_ANON_PAGES,	/* Mapped anonymous pages */ | 
 | 51 | 	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables. | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 52 | 			   only modified from process context */ | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 53 | 	NR_FILE_PAGES, | 
| Christoph Lameter | 9a865ff | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 54 | 	NR_SLAB,	/* Pages used by slab allocator */ | 
| Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 55 | 	NR_PAGETABLE,	/* used for pagetables */ | 
| Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 56 | 	NR_FILE_DIRTY, | 
| Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 57 | 	NR_WRITEBACK, | 
| Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 58 | 	NR_UNSTABLE_NFS,	/* NFS unstable pages */ | 
| Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 59 | 	NR_BOUNCE, | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 60 | #ifdef CONFIG_NUMA | 
 | 61 | 	NUMA_HIT,		/* allocated in intended node */ | 
 | 62 | 	NUMA_MISS,		/* allocated in non intended node */ | 
 | 63 | 	NUMA_FOREIGN,		/* was intended here, hit elsewhere */ | 
 | 64 | 	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */ | 
 | 65 | 	NUMA_LOCAL,		/* allocation from local node */ | 
 | 66 | 	NUMA_OTHER,		/* allocation from other node */ | 
 | 67 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 68 | 	NR_VM_ZONE_STAT_ITEMS }; | 
 | 69 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | struct per_cpu_pages { | 
 | 71 | 	int count;		/* number of pages in the list */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | 	int high;		/* high watermark, emptying needed */ | 
 | 73 | 	int batch;		/* chunk size for buddy add/remove */ | 
 | 74 | 	struct list_head list;	/* the list of pages */ | 
 | 75 | }; | 
 | 76 |  | 
 | 77 | struct per_cpu_pageset { | 
 | 78 | 	struct per_cpu_pages pcp[2];	/* 0: hot.  1: cold */ | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 79 | #ifdef CONFIG_SMP | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 80 | 	s8 stat_threshold; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 81 | 	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | 
 | 82 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } ____cacheline_aligned_in_smp; | 
 | 84 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 85 | #ifdef CONFIG_NUMA | 
 | 86 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) | 
 | 87 | #else | 
 | 88 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | 
 | 89 | #endif | 
 | 90 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #define ZONE_DMA		0 | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 92 | #define ZONE_DMA32		1 | 
 | 93 | #define ZONE_NORMAL		2 | 
 | 94 | #define ZONE_HIGHMEM		3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 96 | #define MAX_NR_ZONES		4	/* Sync this with ZONES_SHIFT */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | #define ZONES_SHIFT		2	/* ceil(log2(MAX_NR_ZONES)) */ | 
 | 98 |  | 
 | 99 |  | 
 | 100 | /* | 
 | 101 |  * When a memory allocation must conform to specific limitations (such | 
 | 102 |  * as being suitable for DMA) the caller will pass in hints to the | 
 | 103 |  * allocator in the gfp_mask, in the zone modifier bits.  These bits | 
 | 104 |  * are used to select a priority ordered list of memory zones which | 
 | 105 |  * match the requested limits.  GFP_ZONEMASK defines which bits within | 
 | 106 |  * the gfp_mask should be considered as zone modifiers.  Each valid | 
 | 107 |  * combination of the zone modifier bits has a corresponding list | 
 | 108 |  * of zones (in node_zonelists).  Thus for two zone modifiers there | 
 | 109 |  * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will | 
 | 110 |  * be 8 (2 ** 3) zonelists.  GFP_ZONETYPES defines the number of possible | 
 | 111 |  * combinations of zone modifiers in "zone modifier space". | 
| Linus Torvalds | ac3461a | 2005-11-22 19:39:30 -0800 | [diff] [blame] | 112 |  * | 
| Andy Whitcroft | 79046ae | 2006-02-01 03:05:26 -0800 | [diff] [blame] | 113 |  * As an optimisation any zone modifier bits which are only valid when | 
 | 114 |  * no other zone modifier bits are set (loners) should be placed in | 
 | 115 |  * the highest order bits of this field.  This allows us to reduce the | 
 | 116 |  * extent of the zonelists thus saving space.  For example in the case | 
 | 117 |  * of three zone modifier bits, we could require up to eight zonelists. | 
 | 118 |  * If the left most zone modifier is a "loner" then the highest valid | 
 | 119 |  * zonelist would be four allowing us to allocate only five zonelists. | 
| Andy Whitcroft | ce2ea89 | 2006-02-01 03:05:27 -0800 | [diff] [blame] | 120 |  * Use the first form for GFP_ZONETYPES when the left most bit is not | 
 | 121 |  * a "loner", otherwise use the second. | 
| Andy Whitcroft | 79046ae | 2006-02-01 03:05:26 -0800 | [diff] [blame] | 122 |  * | 
| Linus Torvalds | ac3461a | 2005-11-22 19:39:30 -0800 | [diff] [blame] | 123 |  * NOTE! Make sure this matches the zones in <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 |  */ | 
| Linus Torvalds | ac3461a | 2005-11-22 19:39:30 -0800 | [diff] [blame] | 125 | #define GFP_ZONEMASK	0x07 | 
| Andy Whitcroft | ce2ea89 | 2006-02-01 03:05:27 -0800 | [diff] [blame] | 126 | /* #define GFP_ZONETYPES       (GFP_ZONEMASK + 1) */           /* Non-loner */ | 
 | 127 | #define GFP_ZONETYPES  ((GFP_ZONEMASK + 1) / 2 + 1)            /* Loner */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 |  | 
 | 129 | /* | 
 | 130 |  * On machines where it is needed (eg PCs) we divide physical memory | 
| Andi Kleen | 1f6818b | 2006-01-11 22:42:26 +0100 | [diff] [blame] | 131 |  * into multiple physical zones. On a 32bit PC we have 4 zones: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 |  * | 
 | 133 |  * ZONE_DMA	  < 16 MB	ISA DMA capable memory | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 134 |  * ZONE_DMA32	     0 MB 	Empty | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 |  * ZONE_NORMAL	16-896 MB	direct mapped by the kernel | 
 | 136 |  * ZONE_HIGHMEM	 > 896 MB	only page cache and user processes | 
 | 137 |  */ | 
 | 138 |  | 
 | 139 | struct zone { | 
 | 140 | 	/* Fields commonly accessed by the page allocator */ | 
 | 141 | 	unsigned long		free_pages; | 
 | 142 | 	unsigned long		pages_min, pages_low, pages_high; | 
 | 143 | 	/* | 
 | 144 | 	 * We don't know if the memory that we're going to allocate will be freeable | 
 | 145 | 	 * or/and it will be released eventually, so to avoid totally wasting several | 
 | 146 | 	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 
 | 147 | 	 * to run OOM on the lower zones despite there's tons of freeable ram | 
 | 148 | 	 * on the higher zones). This array is recalculated at runtime if the | 
 | 149 | 	 * sysctl_lowmem_reserve_ratio sysctl changes. | 
 | 150 | 	 */ | 
 | 151 | 	unsigned long		lowmem_reserve[MAX_NR_ZONES]; | 
 | 152 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 153 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 154 | 	/* | 
 | 155 | 	 * zone reclaim becomes active if more unmapped pages exist. | 
 | 156 | 	 */ | 
 | 157 | 	unsigned long		min_unmapped_ratio; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 158 | 	struct per_cpu_pageset	*pageset[NR_CPUS]; | 
 | 159 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | 	struct per_cpu_pageset	pageset[NR_CPUS]; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 161 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | 	/* | 
 | 163 | 	 * free areas of different sizes | 
 | 164 | 	 */ | 
 | 165 | 	spinlock_t		lock; | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 166 | #ifdef CONFIG_MEMORY_HOTPLUG | 
 | 167 | 	/* see spanned/present_pages for more description */ | 
 | 168 | 	seqlock_t		span_seqlock; | 
 | 169 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | 	struct free_area	free_area[MAX_ORDER]; | 
 | 171 |  | 
 | 172 |  | 
 | 173 | 	ZONE_PADDING(_pad1_) | 
 | 174 |  | 
 | 175 | 	/* Fields commonly accessed by the page reclaim scanner */ | 
 | 176 | 	spinlock_t		lru_lock;	 | 
 | 177 | 	struct list_head	active_list; | 
 | 178 | 	struct list_head	inactive_list; | 
 | 179 | 	unsigned long		nr_scan_active; | 
 | 180 | 	unsigned long		nr_scan_inactive; | 
 | 181 | 	unsigned long		nr_active; | 
 | 182 | 	unsigned long		nr_inactive; | 
 | 183 | 	unsigned long		pages_scanned;	   /* since last reclaim */ | 
 | 184 | 	int			all_unreclaimable; /* All pages pinned */ | 
 | 185 |  | 
| Martin Hicks | 1e7e5a9 | 2005-06-21 17:14:43 -0700 | [diff] [blame] | 186 | 	/* A count of how many reclaimers are scanning this zone */ | 
 | 187 | 	atomic_t		reclaim_in_progress; | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 188 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 189 | 	/* Zone statistics */ | 
 | 190 | 	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 191 |  | 
 | 192 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | 	 * prev_priority holds the scanning priority for this zone.  It is | 
 | 194 | 	 * defined as the scanning priority at which we achieved our reclaim | 
 | 195 | 	 * target at the previous try_to_free_pages() or balance_pgdat() | 
 | 196 | 	 * invokation. | 
 | 197 | 	 * | 
 | 198 | 	 * We use prev_priority as a measure of how much stress page reclaim is | 
 | 199 | 	 * under - it drives the swappiness decision: whether to unmap mapped | 
 | 200 | 	 * pages. | 
 | 201 | 	 * | 
 | 202 | 	 * temp_priority is used to remember the scanning priority at which | 
 | 203 | 	 * this zone was successfully refilled to free_pages == pages_high. | 
 | 204 | 	 * | 
 | 205 | 	 * Access to both these fields is quite racy even on uniprocessor.  But | 
 | 206 | 	 * it is expected to average out OK. | 
 | 207 | 	 */ | 
 | 208 | 	int temp_priority; | 
 | 209 | 	int prev_priority; | 
 | 210 |  | 
 | 211 |  | 
 | 212 | 	ZONE_PADDING(_pad2_) | 
 | 213 | 	/* Rarely used or read-mostly fields */ | 
 | 214 |  | 
 | 215 | 	/* | 
 | 216 | 	 * wait_table		-- the array holding the hash table | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 217 | 	 * wait_table_hash_nr_entries	-- the size of the hash table array | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits) | 
 | 219 | 	 * | 
 | 220 | 	 * The purpose of all these is to keep track of the people | 
 | 221 | 	 * waiting for a page to become available and make them | 
 | 222 | 	 * runnable again when possible. The trouble is that this | 
 | 223 | 	 * consumes a lot of space, especially when so few things | 
 | 224 | 	 * wait on pages at a given time. So instead of using | 
 | 225 | 	 * per-page waitqueues, we use a waitqueue hash table. | 
 | 226 | 	 * | 
 | 227 | 	 * The bucket discipline is to sleep on the same queue when | 
 | 228 | 	 * colliding and wake all in that wait queue when removing. | 
 | 229 | 	 * When something wakes, it must check to be sure its page is | 
 | 230 | 	 * truly available, a la thundering herd. The cost of a | 
 | 231 | 	 * collision is great, but given the expected load of the | 
 | 232 | 	 * table, they should be so rare as to be outweighed by the | 
 | 233 | 	 * benefits from the saved space. | 
 | 234 | 	 * | 
 | 235 | 	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | 
 | 236 | 	 * primary users of these fields, and in mm/page_alloc.c | 
 | 237 | 	 * free_area_init_core() performs the initialization of them. | 
 | 238 | 	 */ | 
 | 239 | 	wait_queue_head_t	* wait_table; | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 240 | 	unsigned long		wait_table_hash_nr_entries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | 	unsigned long		wait_table_bits; | 
 | 242 |  | 
 | 243 | 	/* | 
 | 244 | 	 * Discontig memory support fields. | 
 | 245 | 	 */ | 
 | 246 | 	struct pglist_data	*zone_pgdat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | 	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 
 | 248 | 	unsigned long		zone_start_pfn; | 
 | 249 |  | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 250 | 	/* | 
 | 251 | 	 * zone_start_pfn, spanned_pages and present_pages are all | 
 | 252 | 	 * protected by span_seqlock.  It is a seqlock because it has | 
 | 253 | 	 * to be read outside of zone->lock, and it is done in the main | 
 | 254 | 	 * allocator path.  But, it is written quite infrequently. | 
 | 255 | 	 * | 
 | 256 | 	 * The lock is declared along with zone->lock because it is | 
 | 257 | 	 * frequently read in proximity to zone->lock.  It's good to | 
 | 258 | 	 * give them a chance of being in the same cacheline. | 
 | 259 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | 	unsigned long		spanned_pages;	/* total size, including holes */ | 
 | 261 | 	unsigned long		present_pages;	/* amount of memory (excluding holes) */ | 
 | 262 |  | 
 | 263 | 	/* | 
 | 264 | 	 * rarely used fields: | 
 | 265 | 	 */ | 
 | 266 | 	char			*name; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 267 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 |  | 
 | 269 |  | 
 | 270 | /* | 
 | 271 |  * The "priority" of VM scanning is how much of the queues we will scan in one | 
 | 272 |  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 
 | 273 |  * queues ("queue_length >> 12") during an aging round. | 
 | 274 |  */ | 
 | 275 | #define DEF_PRIORITY 12 | 
 | 276 |  | 
 | 277 | /* | 
 | 278 |  * One allocation request operates on a zonelist. A zonelist | 
 | 279 |  * is a list of zones, the first one is the 'goal' of the | 
 | 280 |  * allocation, the other zones are fallback zones, in decreasing | 
 | 281 |  * priority. | 
 | 282 |  * | 
 | 283 |  * Right now a zonelist takes up less than a cacheline. We never | 
 | 284 |  * modify it apart from boot-up, and only a few indices are used, | 
 | 285 |  * so despite the zonelist table being relatively big, the cache | 
 | 286 |  * footprint of this construct is very small. | 
 | 287 |  */ | 
 | 288 | struct zonelist { | 
 | 289 | 	struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited | 
 | 290 | }; | 
 | 291 |  | 
 | 292 |  | 
 | 293 | /* | 
 | 294 |  * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | 
 | 295 |  * (mostly NUMA machines?) to denote a higher-level memory zone than the | 
 | 296 |  * zone denotes. | 
 | 297 |  * | 
 | 298 |  * On NUMA machines, each NUMA node would have a pg_data_t to describe | 
 | 299 |  * it's memory layout. | 
 | 300 |  * | 
 | 301 |  * Memory statistics and page replacement data structures are maintained on a | 
 | 302 |  * per-zone basis. | 
 | 303 |  */ | 
 | 304 | struct bootmem_data; | 
 | 305 | typedef struct pglist_data { | 
 | 306 | 	struct zone node_zones[MAX_NR_ZONES]; | 
 | 307 | 	struct zonelist node_zonelists[GFP_ZONETYPES]; | 
 | 308 | 	int nr_zones; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 309 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | 	struct page *node_mem_map; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 311 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | 	struct bootmem_data *bdata; | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 313 | #ifdef CONFIG_MEMORY_HOTPLUG | 
 | 314 | 	/* | 
 | 315 | 	 * Must be held any time you expect node_start_pfn, node_present_pages | 
 | 316 | 	 * or node_spanned_pages stay constant.  Holding this will also | 
 | 317 | 	 * guarantee that any pfn_valid() stays that way. | 
 | 318 | 	 * | 
 | 319 | 	 * Nests above zone->lock and zone->size_seqlock. | 
 | 320 | 	 */ | 
 | 321 | 	spinlock_t node_size_lock; | 
 | 322 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | 	unsigned long node_start_pfn; | 
 | 324 | 	unsigned long node_present_pages; /* total number of physical pages */ | 
 | 325 | 	unsigned long node_spanned_pages; /* total size of physical page | 
 | 326 | 					     range, including holes */ | 
 | 327 | 	int node_id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | 	wait_queue_head_t kswapd_wait; | 
 | 329 | 	struct task_struct *kswapd; | 
 | 330 | 	int kswapd_max_order; | 
 | 331 | } pg_data_t; | 
 | 332 |  | 
 | 333 | #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages) | 
 | 334 | #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 335 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 336 | #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 337 | #else | 
 | 338 | #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | 
 | 339 | #endif | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 340 | #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 342 | #include <linux/memory_hotplug.h> | 
 | 343 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, | 
 | 345 | 			unsigned long *free, struct pglist_data *pgdat); | 
 | 346 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 
 | 347 | 			unsigned long *free); | 
 | 348 | void build_all_zonelists(void); | 
 | 349 | void wakeup_kswapd(struct zone *zone, int order); | 
 | 350 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 351 | 		int classzone_idx, int alloc_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 |  | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 353 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | 
 | 354 | 				     unsigned long size); | 
 | 355 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 
 | 357 | void memory_present(int nid, unsigned long start, unsigned long end); | 
 | 358 | #else | 
 | 359 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | 
 | 360 | #endif | 
 | 361 |  | 
 | 362 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | 
 | 363 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
 | 364 | #endif | 
 | 365 |  | 
 | 366 | /* | 
 | 367 |  * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | 
 | 368 |  */ | 
 | 369 | #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones) | 
 | 370 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 371 | static inline int populated_zone(struct zone *zone) | 
 | 372 | { | 
 | 373 | 	return (!!zone->present_pages); | 
 | 374 | } | 
 | 375 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | static inline int is_highmem_idx(int idx) | 
 | 377 | { | 
 | 378 | 	return (idx == ZONE_HIGHMEM); | 
 | 379 | } | 
 | 380 |  | 
 | 381 | static inline int is_normal_idx(int idx) | 
 | 382 | { | 
 | 383 | 	return (idx == ZONE_NORMAL); | 
 | 384 | } | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 385 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | /** | 
 | 387 |  * is_highmem - helper function to quickly check if a struct zone is a  | 
 | 388 |  *              highmem zone or not.  This is an attempt to keep references | 
 | 389 |  *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | 
 | 390 |  * @zone - pointer to struct zone variable | 
 | 391 |  */ | 
 | 392 | static inline int is_highmem(struct zone *zone) | 
 | 393 | { | 
 | 394 | 	return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; | 
 | 395 | } | 
 | 396 |  | 
 | 397 | static inline int is_normal(struct zone *zone) | 
 | 398 | { | 
 | 399 | 	return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | 
 | 400 | } | 
 | 401 |  | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 402 | static inline int is_dma32(struct zone *zone) | 
 | 403 | { | 
 | 404 | 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | 
 | 405 | } | 
 | 406 |  | 
 | 407 | static inline int is_dma(struct zone *zone) | 
 | 408 | { | 
 | 409 | 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | 
 | 410 | } | 
 | 411 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | /* These two functions are used to setup the per zone pages min values */ | 
 | 413 | struct ctl_table; | 
 | 414 | struct file; | 
 | 415 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,  | 
 | 416 | 					void __user *, size_t *, loff_t *); | 
 | 417 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | 
 | 418 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | 
 | 419 | 					void __user *, size_t *, loff_t *); | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 420 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, | 
 | 421 | 					void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 422 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | 
 | 423 | 			struct file *, void __user *, size_t *, loff_t *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 |  | 
 | 425 | #include <linux/topology.h> | 
 | 426 | /* Returns the number of the current Node. */ | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 427 | #ifndef numa_node_id | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 428 | #define numa_node_id()		(cpu_to_node(raw_smp_processor_id())) | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 429 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 431 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 |  | 
 | 433 | extern struct pglist_data contig_page_data; | 
 | 434 | #define NODE_DATA(nid)		(&contig_page_data) | 
 | 435 | #define NODE_MEM_MAP(nid)	mem_map | 
 | 436 | #define MAX_NODES_SHIFT		1 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 438 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 |  | 
 | 440 | #include <asm/mmzone.h> | 
 | 441 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 442 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 443 |  | 
| KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 444 | extern struct pglist_data *first_online_pgdat(void); | 
 | 445 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | 
 | 446 | extern struct zone *next_zone(struct zone *zone); | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 447 |  | 
 | 448 | /** | 
 | 449 |  * for_each_pgdat - helper macro to iterate over all nodes | 
 | 450 |  * @pgdat - pointer to a pg_data_t variable | 
 | 451 |  */ | 
 | 452 | #define for_each_online_pgdat(pgdat)			\ | 
 | 453 | 	for (pgdat = first_online_pgdat();		\ | 
 | 454 | 	     pgdat;					\ | 
 | 455 | 	     pgdat = next_online_pgdat(pgdat)) | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 456 | /** | 
 | 457 |  * for_each_zone - helper macro to iterate over all memory zones | 
 | 458 |  * @zone - pointer to struct zone variable | 
 | 459 |  * | 
 | 460 |  * The user only needs to declare the zone variable, for_each_zone | 
 | 461 |  * fills it in. | 
 | 462 |  */ | 
 | 463 | #define for_each_zone(zone)			        \ | 
 | 464 | 	for (zone = (first_online_pgdat())->node_zones; \ | 
 | 465 | 	     zone;					\ | 
 | 466 | 	     zone = next_zone(zone)) | 
 | 467 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 468 | #ifdef CONFIG_SPARSEMEM | 
 | 469 | #include <asm/sparsemem.h> | 
 | 470 | #endif | 
 | 471 |  | 
| Andi Kleen | 07808b7 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 472 | #if BITS_PER_LONG == 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | /* | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 474 |  * with 32 bit page->flags field, we reserve 9 bits for node/zone info. | 
 | 475 |  * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 |  */ | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 477 | #define FLAGS_RESERVED		9 | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 478 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | #elif BITS_PER_LONG == 64 | 
 | 480 | /* | 
 | 481 |  * with 64 bit flags field, there's plenty of room. | 
 | 482 |  */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 483 | #define FLAGS_RESERVED		32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 |  | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 485 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 |  | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 487 | #error BITS_PER_LONG not defined | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | #endif | 
 | 490 |  | 
| Andy Whitcroft | b159d43 | 2005-06-23 00:07:52 -0700 | [diff] [blame] | 491 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | 
 | 492 | #define early_pfn_to_nid(nid)  (0UL) | 
 | 493 | #endif | 
 | 494 |  | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 495 | #ifdef CONFIG_FLATMEM | 
 | 496 | #define pfn_to_nid(pfn)		(0) | 
 | 497 | #endif | 
 | 498 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 499 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 
 | 500 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 
 | 501 |  | 
 | 502 | #ifdef CONFIG_SPARSEMEM | 
 | 503 |  | 
 | 504 | /* | 
 | 505 |  * SECTION_SHIFT    		#bits space required to store a section # | 
 | 506 |  * | 
 | 507 |  * PA_SECTION_SHIFT		physical address to/from section number | 
 | 508 |  * PFN_SECTION_SHIFT		pfn to/from section number | 
 | 509 |  */ | 
 | 510 | #define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | 
 | 511 |  | 
 | 512 | #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS) | 
 | 513 | #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT) | 
 | 514 |  | 
 | 515 | #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT) | 
 | 516 |  | 
 | 517 | #define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT) | 
 | 518 | #define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1)) | 
 | 519 |  | 
 | 520 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | 
 | 521 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 
 | 522 | #endif | 
 | 523 |  | 
 | 524 | struct page; | 
 | 525 | struct mem_section { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 526 | 	/* | 
 | 527 | 	 * This is, logically, a pointer to an array of struct | 
 | 528 | 	 * pages.  However, it is stored with some other magic. | 
 | 529 | 	 * (see sparse.c::sparse_init_one_section()) | 
 | 530 | 	 * | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 531 | 	 * Additionally during early boot we encode node id of | 
 | 532 | 	 * the location of the section here to guide allocation. | 
 | 533 | 	 * (see sparse.c::memory_present()) | 
 | 534 | 	 * | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 535 | 	 * Making it a UL at least makes someone do a cast | 
 | 536 | 	 * before using it wrong. | 
 | 537 | 	 */ | 
 | 538 | 	unsigned long section_mem_map; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 539 | }; | 
 | 540 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 541 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
 | 542 | #define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section)) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 543 | #else | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 544 | #define SECTIONS_PER_ROOT	1 | 
 | 545 | #endif | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 546 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 547 | #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT) | 
 | 548 | #define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | 
 | 549 | #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1) | 
 | 550 |  | 
 | 551 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
 | 552 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | 
 | 553 | #else | 
 | 554 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | 
 | 555 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 556 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 557 | static inline struct mem_section *__nr_to_section(unsigned long nr) | 
 | 558 | { | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 559 | 	if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | 
 | 560 | 		return NULL; | 
 | 561 | 	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 562 | } | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 563 | extern int __section_nr(struct mem_section* ms); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 564 |  | 
 | 565 | /* | 
 | 566 |  * We use the lower bits of the mem_map pointer to store | 
 | 567 |  * a little bit of information.  There should be at least | 
 | 568 |  * 3 bits here due to 32-bit alignment. | 
 | 569 |  */ | 
 | 570 | #define	SECTION_MARKED_PRESENT	(1UL<<0) | 
 | 571 | #define SECTION_HAS_MEM_MAP	(1UL<<1) | 
 | 572 | #define SECTION_MAP_LAST_BIT	(1UL<<2) | 
 | 573 | #define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1)) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 574 | #define SECTION_NID_SHIFT	2 | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 575 |  | 
 | 576 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | 
 | 577 | { | 
 | 578 | 	unsigned long map = section->section_mem_map; | 
 | 579 | 	map &= SECTION_MAP_MASK; | 
 | 580 | 	return (struct page *)map; | 
 | 581 | } | 
 | 582 |  | 
 | 583 | static inline int valid_section(struct mem_section *section) | 
 | 584 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 585 | 	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 586 | } | 
 | 587 |  | 
 | 588 | static inline int section_has_mem_map(struct mem_section *section) | 
 | 589 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 590 | 	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 591 | } | 
 | 592 |  | 
 | 593 | static inline int valid_section_nr(unsigned long nr) | 
 | 594 | { | 
 | 595 | 	return valid_section(__nr_to_section(nr)); | 
 | 596 | } | 
 | 597 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 598 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | 
 | 599 | { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 600 | 	return __nr_to_section(pfn_to_section_nr(pfn)); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 601 | } | 
 | 602 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 603 | static inline int pfn_valid(unsigned long pfn) | 
 | 604 | { | 
 | 605 | 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
 | 606 | 		return 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 607 | 	return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 608 | } | 
 | 609 |  | 
 | 610 | /* | 
 | 611 |  * These are _only_ used during initialisation, therefore they | 
 | 612 |  * can use __initdata ...  They could have names to indicate | 
 | 613 |  * this restriction. | 
 | 614 |  */ | 
 | 615 | #ifdef CONFIG_NUMA | 
| Andy Whitcroft | 161599f | 2006-01-06 00:10:54 -0800 | [diff] [blame] | 616 | #define pfn_to_nid(pfn)							\ | 
 | 617 | ({									\ | 
 | 618 | 	unsigned long __pfn_to_nid_pfn = (pfn);				\ | 
 | 619 | 	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\ | 
 | 620 | }) | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 621 | #else | 
 | 622 | #define pfn_to_nid(pfn)		(0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 623 | #endif | 
 | 624 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 625 | #define early_pfn_valid(pfn)	pfn_valid(pfn) | 
 | 626 | void sparse_init(void); | 
 | 627 | #else | 
 | 628 | #define sparse_init()	do {} while (0) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 629 | #define sparse_index_init(_sec, _nid)  do {} while (0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 630 | #endif /* CONFIG_SPARSEMEM */ | 
 | 631 |  | 
 | 632 | #ifndef early_pfn_valid | 
 | 633 | #define early_pfn_valid(pfn)	(1) | 
 | 634 | #endif | 
 | 635 |  | 
 | 636 | void memory_present(int nid, unsigned long start, unsigned long end); | 
 | 637 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
 | 638 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | #endif /* !__ASSEMBLY__ */ | 
 | 640 | #endif /* __KERNEL__ */ | 
 | 641 | #endif /* _LINUX_MMZONE_H */ |