| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/page_alloc.c | 
 | 3 |  * | 
 | 4 |  *  Manages the free list, the system allocates free pages here. | 
 | 5 |  *  Note that kmalloc() lives in slab.c | 
 | 6 |  * | 
 | 7 |  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 8 |  *  Swap reorganised 29.12.95, Stephen Tweedie | 
 | 9 |  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | 
 | 10 |  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 | 
 | 11 |  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 | 
 | 12 |  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000 | 
 | 13 |  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 | 
 | 14 |  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton) | 
 | 15 |  */ | 
 | 16 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/stddef.h> | 
 | 18 | #include <linux/mm.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/interrupt.h> | 
 | 21 | #include <linux/pagemap.h> | 
 | 22 | #include <linux/bootmem.h> | 
 | 23 | #include <linux/compiler.h> | 
| Randy Dunlap | 9f15833 | 2005-09-13 01:25:16 -0700 | [diff] [blame] | 24 | #include <linux/kernel.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/module.h> | 
 | 26 | #include <linux/suspend.h> | 
 | 27 | #include <linux/pagevec.h> | 
 | 28 | #include <linux/blkdev.h> | 
 | 29 | #include <linux/slab.h> | 
 | 30 | #include <linux/notifier.h> | 
 | 31 | #include <linux/topology.h> | 
 | 32 | #include <linux/sysctl.h> | 
 | 33 | #include <linux/cpu.h> | 
 | 34 | #include <linux/cpuset.h> | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 35 | #include <linux/memory_hotplug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/nodemask.h> | 
 | 37 | #include <linux/vmalloc.h> | 
| Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 38 | #include <linux/mempolicy.h> | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 39 | #include <linux/stop_machine.h> | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 40 | #include <linux/sort.h> | 
 | 41 | #include <linux/pfn.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 |  | 
 | 43 | #include <asm/tlbflush.h> | 
| Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 44 | #include <asm/div64.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include "internal.h" | 
 | 46 |  | 
 | 47 | /* | 
 | 48 |  * MCD - HACK: Find somewhere to initialize this EARLY, or make this | 
 | 49 |  * initializer cleaner | 
 | 50 |  */ | 
| Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 51 | nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; | 
| Dean Nelson | 7223a93 | 2005-03-23 19:00:00 -0700 | [diff] [blame] | 52 | EXPORT_SYMBOL(node_online_map); | 
| Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 53 | nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; | 
| Dean Nelson | 7223a93 | 2005-03-23 19:00:00 -0700 | [diff] [blame] | 54 | EXPORT_SYMBOL(node_possible_map); | 
| Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 55 | unsigned long totalram_pages __read_mostly; | 
| Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 56 | unsigned long totalreserve_pages __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | long nr_swap_pages; | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 58 | int percpu_pagelist_fraction; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 |  | 
| Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 60 | static void __free_pages_ok(struct page *page, unsigned int order); | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 61 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | /* | 
 | 63 |  * results with 256, 32 in the lowmem_reserve sysctl: | 
 | 64 |  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high) | 
 | 65 |  *	1G machine -> (16M dma, 784M normal, 224M high) | 
 | 66 |  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA | 
 | 67 |  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL | 
 | 68 |  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 69 |  * | 
 | 70 |  * TBD: should special case ZONE_DMA32 machines here - in those we normally | 
 | 71 |  * don't need any ZONE_NORMAL reservation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 |  */ | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 73 | int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { | 
 | 74 | 	 256, | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 75 | #ifdef CONFIG_ZONE_DMA32 | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 76 | 	 256, | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 77 | #endif | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 78 | #ifdef CONFIG_HIGHMEM | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 79 | 	 32 | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 80 | #endif | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 81 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 |  | 
 | 83 | EXPORT_SYMBOL(totalram_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 |  | 
 | 85 | /* | 
 | 86 |  * Used by page_zone() to look up the address of the struct zone whose | 
 | 87 |  * id is encoded in the upper bits of page->flags | 
 | 88 |  */ | 
| Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 89 | struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | EXPORT_SYMBOL(zone_table); | 
 | 91 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 92 | static char *zone_names[MAX_NR_ZONES] = { | 
 | 93 | 	 "DMA", | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_ZONE_DMA32 | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 95 | 	 "DMA32", | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 96 | #endif | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 97 | 	 "Normal", | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 98 | #ifdef CONFIG_HIGHMEM | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 99 | 	 "HighMem" | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 100 | #endif | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 101 | }; | 
 | 102 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | int min_free_kbytes = 1024; | 
 | 104 |  | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 105 | unsigned long __meminitdata nr_kernel_pages; | 
 | 106 | unsigned long __meminitdata nr_all_pages; | 
| Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 107 | static unsigned long __initdata dma_reserve; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 109 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 
 | 110 |   /* | 
 | 111 |    * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct | 
 | 112 |    * ranges of memory (RAM) that may be registered with add_active_range(). | 
 | 113 |    * Ranges passed to add_active_range() will be merged if possible | 
 | 114 |    * so the number of times add_active_range() can be called is | 
 | 115 |    * related to the number of nodes and the number of holes | 
 | 116 |    */ | 
 | 117 |   #ifdef CONFIG_MAX_ACTIVE_REGIONS | 
 | 118 |     /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ | 
 | 119 |     #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS | 
 | 120 |   #else | 
 | 121 |     #if MAX_NUMNODES >= 32 | 
 | 122 |       /* If there can be many nodes, allow up to 50 holes per node */ | 
 | 123 |       #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) | 
 | 124 |     #else | 
 | 125 |       /* By default, allow up to 256 distinct regions */ | 
 | 126 |       #define MAX_ACTIVE_REGIONS 256 | 
 | 127 |     #endif | 
 | 128 |   #endif | 
 | 129 |  | 
 | 130 |   struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; | 
 | 131 |   int __initdata nr_nodemap_entries; | 
 | 132 |   unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; | 
 | 133 |   unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; | 
| Mel Gorman | fb01439 | 2006-09-27 01:49:59 -0700 | [diff] [blame] | 134 | #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE | 
 | 135 |   unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; | 
 | 136 |   unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; | 
 | 137 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 138 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 
 | 139 |  | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 140 | #ifdef CONFIG_DEBUG_VM | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 141 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 143 | 	int ret = 0; | 
 | 144 | 	unsigned seq; | 
 | 145 | 	unsigned long pfn = page_to_pfn(page); | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 146 |  | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 147 | 	do { | 
 | 148 | 		seq = zone_span_seqbegin(zone); | 
 | 149 | 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages) | 
 | 150 | 			ret = 1; | 
 | 151 | 		else if (pfn < zone->zone_start_pfn) | 
 | 152 | 			ret = 1; | 
 | 153 | 	} while (zone_span_seqretry(zone, seq)); | 
 | 154 |  | 
 | 155 | 	return ret; | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 156 | } | 
 | 157 |  | 
 | 158 | static int page_is_consistent(struct zone *zone, struct page *page) | 
 | 159 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | #ifdef CONFIG_HOLES_IN_ZONE | 
 | 161 | 	if (!pfn_valid(page_to_pfn(page))) | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 162 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | #endif | 
 | 164 | 	if (zone != page_zone(page)) | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 165 | 		return 0; | 
 | 166 |  | 
 | 167 | 	return 1; | 
 | 168 | } | 
 | 169 | /* | 
 | 170 |  * Temporary debugging check for pages not lying within a given zone. | 
 | 171 |  */ | 
 | 172 | static int bad_range(struct zone *zone, struct page *page) | 
 | 173 | { | 
 | 174 | 	if (page_outside_zone_boundaries(zone, page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | 		return 1; | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 176 | 	if (!page_is_consistent(zone, page)) | 
 | 177 | 		return 1; | 
 | 178 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | 	return 0; | 
 | 180 | } | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 181 | #else | 
 | 182 | static inline int bad_range(struct zone *zone, struct page *page) | 
 | 183 | { | 
 | 184 | 	return 0; | 
 | 185 | } | 
 | 186 | #endif | 
 | 187 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 188 | static void bad_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | { | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 190 | 	printk(KERN_EMERG "Bad page state in process '%s'\n" | 
| Hugh Dickins | 7365f3d | 2006-01-11 12:17:18 -0800 | [diff] [blame] | 191 | 		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | 
 | 192 | 		KERN_EMERG "Trying to fix it up, but a reboot is needed\n" | 
 | 193 | 		KERN_EMERG "Backtrace:\n", | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 194 | 		current->comm, page, (int)(2*sizeof(unsigned long)), | 
 | 195 | 		(unsigned long)page->flags, page->mapping, | 
 | 196 | 		page_mapcount(page), page_count(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | 	dump_stack(); | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 198 | 	page->flags &= ~(1 << PG_lru	| | 
 | 199 | 			1 << PG_private | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | 			1 << PG_locked	| | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | 			1 << PG_active	| | 
 | 202 | 			1 << PG_dirty	| | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 203 | 			1 << PG_reclaim | | 
 | 204 | 			1 << PG_slab    | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | 			1 << PG_swapcache | | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 206 | 			1 << PG_writeback | | 
 | 207 | 			1 << PG_buddy ); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | 	set_page_count(page, 0); | 
 | 209 | 	reset_page_mapcount(page); | 
 | 210 | 	page->mapping = NULL; | 
| Randy Dunlap | 9f15833 | 2005-09-13 01:25:16 -0700 | [diff] [blame] | 211 | 	add_taint(TAINT_BAD_PAGE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | } | 
 | 213 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | /* | 
 | 215 |  * Higher-order pages are called "compound pages".  They are structured thusly: | 
 | 216 |  * | 
 | 217 |  * The first PAGE_SIZE page is called the "head page". | 
 | 218 |  * | 
 | 219 |  * The remaining PAGE_SIZE pages are called "tail pages". | 
 | 220 |  * | 
 | 221 |  * All pages have PG_compound set.  All pages have their ->private pointing at | 
 | 222 |  * the head page (even the head page has this). | 
 | 223 |  * | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 224 |  * The first tail page's ->lru.next holds the address of the compound page's | 
 | 225 |  * put_page() function.  Its ->lru.prev holds the order of allocation. | 
 | 226 |  * This usage means that zero-order pages may not be compound. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 |  */ | 
| Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 228 |  | 
 | 229 | static void free_compound_page(struct page *page) | 
 | 230 | { | 
 | 231 | 	__free_pages_ok(page, (unsigned long)page[1].lru.prev); | 
 | 232 | } | 
 | 233 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | static void prep_compound_page(struct page *page, unsigned long order) | 
 | 235 | { | 
 | 236 | 	int i; | 
 | 237 | 	int nr_pages = 1 << order; | 
 | 238 |  | 
| Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 239 | 	page[1].lru.next = (void *)free_compound_page;	/* set dtor */ | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 240 | 	page[1].lru.prev = (void *)order; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | 	for (i = 0; i < nr_pages; i++) { | 
 | 242 | 		struct page *p = page + i; | 
 | 243 |  | 
| Nick Piggin | 5e9dace | 2006-03-22 00:08:01 -0800 | [diff] [blame] | 244 | 		__SetPageCompound(p); | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 245 | 		set_page_private(p, (unsigned long)page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | 	} | 
 | 247 | } | 
 | 248 |  | 
 | 249 | static void destroy_compound_page(struct page *page, unsigned long order) | 
 | 250 | { | 
 | 251 | 	int i; | 
 | 252 | 	int nr_pages = 1 << order; | 
 | 253 |  | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 254 | 	if (unlikely((unsigned long)page[1].lru.prev != order)) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 255 | 		bad_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 |  | 
 | 257 | 	for (i = 0; i < nr_pages; i++) { | 
 | 258 | 		struct page *p = page + i; | 
 | 259 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 260 | 		if (unlikely(!PageCompound(p) | | 
 | 261 | 				(page_private(p) != (unsigned long)page))) | 
 | 262 | 			bad_page(page); | 
| Nick Piggin | 5e9dace | 2006-03-22 00:08:01 -0800 | [diff] [blame] | 263 | 		__ClearPageCompound(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | 	} | 
 | 265 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 |  | 
| Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 267 | static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | 
 | 268 | { | 
 | 269 | 	int i; | 
 | 270 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 271 | 	VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); | 
| Andrew Morton | 6626c5d | 2006-03-22 00:08:42 -0800 | [diff] [blame] | 272 | 	/* | 
 | 273 | 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO | 
 | 274 | 	 * and __GFP_HIGHMEM from hard or soft interrupt context. | 
 | 275 | 	 */ | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 276 | 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); | 
| Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 277 | 	for (i = 0; i < (1 << order); i++) | 
 | 278 | 		clear_highpage(page + i); | 
 | 279 | } | 
 | 280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | /* | 
 | 282 |  * function for dealing with page's order in buddy system. | 
 | 283 |  * zone->lock is already acquired when we use these. | 
 | 284 |  * So, we don't need atomic page->flags operations here. | 
 | 285 |  */ | 
| Andrew Morton | 6aa3001 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 286 | static inline unsigned long page_order(struct page *page) | 
 | 287 | { | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 288 | 	return page_private(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } | 
 | 290 |  | 
| Andrew Morton | 6aa3001 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 291 | static inline void set_page_order(struct page *page, int order) | 
 | 292 | { | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 293 | 	set_page_private(page, order); | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 294 | 	__SetPageBuddy(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | } | 
 | 296 |  | 
 | 297 | static inline void rmv_page_order(struct page *page) | 
 | 298 | { | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 299 | 	__ClearPageBuddy(page); | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 300 | 	set_page_private(page, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } | 
 | 302 |  | 
 | 303 | /* | 
 | 304 |  * Locate the struct page for both the matching buddy in our | 
 | 305 |  * pair (buddy1) and the combined O(n+1) page they form (page). | 
 | 306 |  * | 
 | 307 |  * 1) Any buddy B1 will have an order O twin B2 which satisfies | 
 | 308 |  * the following equation: | 
 | 309 |  *     B2 = B1 ^ (1 << O) | 
 | 310 |  * For example, if the starting buddy (buddy2) is #8 its order | 
 | 311 |  * 1 buddy is #10: | 
 | 312 |  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | 
 | 313 |  * | 
 | 314 |  * 2) Any buddy B will have an order O+1 parent P which | 
 | 315 |  * satisfies the following equation: | 
 | 316 |  *     P = B & ~(1 << O) | 
 | 317 |  * | 
| Andreas Mohr | d6e05ed | 2006-06-26 18:35:02 +0200 | [diff] [blame] | 318 |  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 |  */ | 
 | 320 | static inline struct page * | 
 | 321 | __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) | 
 | 322 | { | 
 | 323 | 	unsigned long buddy_idx = page_idx ^ (1 << order); | 
 | 324 |  | 
 | 325 | 	return page + (buddy_idx - page_idx); | 
 | 326 | } | 
 | 327 |  | 
 | 328 | static inline unsigned long | 
 | 329 | __find_combined_index(unsigned long page_idx, unsigned int order) | 
 | 330 | { | 
 | 331 | 	return (page_idx & ~(1 << order)); | 
 | 332 | } | 
 | 333 |  | 
 | 334 | /* | 
 | 335 |  * This function checks whether a page is free && is the buddy | 
 | 336 |  * we can do coalesce a page and its buddy if | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 337 |  * (a) the buddy is not in a hole && | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 338 |  * (b) the buddy is in the buddy system && | 
| Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 339 |  * (c) a page and its buddy have the same order && | 
 | 340 |  * (d) a page and its buddy are in the same zone. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 |  * | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 342 |  * For recording whether a page is in the buddy system, we use PG_buddy. | 
 | 343 |  * Setting, clearing, and testing PG_buddy is serialized by zone->lock. | 
 | 344 |  * | 
 | 345 |  * For recording page's order, we use page_private(page). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 |  */ | 
| Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 347 | static inline int page_is_buddy(struct page *page, struct page *buddy, | 
 | 348 | 								int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | { | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 350 | #ifdef CONFIG_HOLES_IN_ZONE | 
| Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 351 | 	if (!pfn_valid(page_to_pfn(buddy))) | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 352 | 		return 0; | 
 | 353 | #endif | 
 | 354 |  | 
| Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 355 | 	if (page_zone_id(page) != page_zone_id(buddy)) | 
 | 356 | 		return 0; | 
 | 357 |  | 
 | 358 | 	if (PageBuddy(buddy) && page_order(buddy) == order) { | 
 | 359 | 		BUG_ON(page_count(buddy) != 0); | 
| Andrew Morton | 6aa3001 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 360 | 		return 1; | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 361 | 	} | 
| Andrew Morton | 6aa3001 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 362 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | } | 
 | 364 |  | 
 | 365 | /* | 
 | 366 |  * Freeing function for a buddy system allocator. | 
 | 367 |  * | 
 | 368 |  * The concept of a buddy system is to maintain direct-mapped table | 
 | 369 |  * (containing bit values) for memory blocks of various "orders". | 
 | 370 |  * The bottom level table contains the map for the smallest allocatable | 
 | 371 |  * units of memory (here, pages), and each level above it describes | 
 | 372 |  * pairs of units from the levels below, hence, "buddies". | 
 | 373 |  * At a high level, all that happens here is marking the table entry | 
 | 374 |  * at the bottom level available, and propagating the changes upward | 
 | 375 |  * as necessary, plus some accounting needed to play nicely with other | 
 | 376 |  * parts of the VM system. | 
 | 377 |  * At each level, we keep a list of pages, which are heads of continuous | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 378 |  * free pages of length of (1 << order) and marked with PG_buddy. Page's | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 379 |  * order is recorded in page_private(page) field. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 |  * So when we are allocating or freeing one, we can derive the state of the | 
 | 381 |  * other.  That is, if we allocate a small block, and both were    | 
 | 382 |  * free, the remainder of the region must be split into blocks.    | 
 | 383 |  * If a block is freed, and its buddy is also free, then this | 
 | 384 |  * triggers coalescing into a block of larger size.             | 
 | 385 |  * | 
 | 386 |  * -- wli | 
 | 387 |  */ | 
 | 388 |  | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 389 | static inline void __free_one_page(struct page *page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | 		struct zone *zone, unsigned int order) | 
 | 391 | { | 
 | 392 | 	unsigned long page_idx; | 
 | 393 | 	int order_size = 1 << order; | 
 | 394 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 395 | 	if (unlikely(PageCompound(page))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | 		destroy_compound_page(page, order); | 
 | 397 |  | 
 | 398 | 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); | 
 | 399 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 400 | 	VM_BUG_ON(page_idx & (order_size - 1)); | 
 | 401 | 	VM_BUG_ON(bad_range(zone, page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 |  | 
 | 403 | 	zone->free_pages += order_size; | 
 | 404 | 	while (order < MAX_ORDER-1) { | 
 | 405 | 		unsigned long combined_idx; | 
 | 406 | 		struct free_area *area; | 
 | 407 | 		struct page *buddy; | 
 | 408 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | 		buddy = __page_find_buddy(page, page_idx, order); | 
| Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 410 | 		if (!page_is_buddy(page, buddy, order)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | 			break;		/* Move the buddy up one level. */ | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 412 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | 		list_del(&buddy->lru); | 
 | 414 | 		area = zone->free_area + order; | 
 | 415 | 		area->nr_free--; | 
 | 416 | 		rmv_page_order(buddy); | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 417 | 		combined_idx = __find_combined_index(page_idx, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | 		page = page + (combined_idx - page_idx); | 
 | 419 | 		page_idx = combined_idx; | 
 | 420 | 		order++; | 
 | 421 | 	} | 
 | 422 | 	set_page_order(page, order); | 
 | 423 | 	list_add(&page->lru, &zone->free_area[order].free_list); | 
 | 424 | 	zone->free_area[order].nr_free++; | 
 | 425 | } | 
 | 426 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 427 | static inline int free_pages_check(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | { | 
| Nick Piggin | 92be2e3 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 429 | 	if (unlikely(page_mapcount(page) | | 
 | 430 | 		(page->mapping != NULL)  | | 
 | 431 | 		(page_count(page) != 0)  | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | 		(page->flags & ( | 
 | 433 | 			1 << PG_lru	| | 
 | 434 | 			1 << PG_private | | 
 | 435 | 			1 << PG_locked	| | 
 | 436 | 			1 << PG_active	| | 
 | 437 | 			1 << PG_reclaim	| | 
 | 438 | 			1 << PG_slab	| | 
 | 439 | 			1 << PG_swapcache | | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 440 | 			1 << PG_writeback | | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 441 | 			1 << PG_reserved | | 
 | 442 | 			1 << PG_buddy )))) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 443 | 		bad_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | 	if (PageDirty(page)) | 
| Nick Piggin | 242e546 | 2005-09-03 15:54:50 -0700 | [diff] [blame] | 445 | 		__ClearPageDirty(page); | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 446 | 	/* | 
 | 447 | 	 * For now, we report if PG_reserved was found set, but do not | 
 | 448 | 	 * clear it, and do not free the page.  But we shall soon need | 
 | 449 | 	 * to do more, for when the ZERO_PAGE count wraps negative. | 
 | 450 | 	 */ | 
 | 451 | 	return PageReserved(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } | 
 | 453 |  | 
 | 454 | /* | 
 | 455 |  * Frees a list of pages.  | 
 | 456 |  * Assumes all pages on list are in same zone, and of same order. | 
| Renaud Lienhart | 207f36e | 2005-09-10 00:26:59 -0700 | [diff] [blame] | 457 |  * count is the number of pages to free. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 |  * | 
 | 459 |  * If the zone was previously in an "all pages pinned" state then look to | 
 | 460 |  * see if this freeing clears that state. | 
 | 461 |  * | 
 | 462 |  * And clear the zone's pages_scanned counter, to hold off the "all pages are | 
 | 463 |  * pinned" detection logic. | 
 | 464 |  */ | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 465 | static void free_pages_bulk(struct zone *zone, int count, | 
 | 466 | 					struct list_head *list, int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | { | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 468 | 	spin_lock(&zone->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | 	zone->all_unreclaimable = 0; | 
 | 470 | 	zone->pages_scanned = 0; | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 471 | 	while (count--) { | 
 | 472 | 		struct page *page; | 
 | 473 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 474 | 		VM_BUG_ON(list_empty(list)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | 		page = list_entry(list->prev, struct page, lru); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 476 | 		/* have to delete it as __free_one_page list manipulates */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | 		list_del(&page->lru); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 478 | 		__free_one_page(page, zone, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | 	} | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 480 | 	spin_unlock(&zone->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | } | 
 | 482 |  | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 483 | static void free_one_page(struct zone *zone, struct page *page, int order) | 
 | 484 | { | 
| Christoph Lameter | 006d22d | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 485 | 	spin_lock(&zone->lock); | 
 | 486 | 	zone->all_unreclaimable = 0; | 
 | 487 | 	zone->pages_scanned = 0; | 
 | 488 | 	__free_one_page(page, zone ,order); | 
 | 489 | 	spin_unlock(&zone->lock); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 490 | } | 
 | 491 |  | 
 | 492 | static void __free_pages_ok(struct page *page, unsigned int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | { | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 494 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | 	int i; | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 496 | 	int reserved = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 |  | 
 | 498 | 	arch_free_page(page, order); | 
| Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 499 | 	if (!PageHighMem(page)) | 
| Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 500 | 		debug_check_no_locks_freed(page_address(page), | 
 | 501 | 					   PAGE_SIZE<<order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | 	for (i = 0 ; i < (1 << order) ; ++i) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 504 | 		reserved += free_pages_check(page + i); | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 505 | 	if (reserved) | 
 | 506 | 		return; | 
 | 507 |  | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 508 | 	kernel_map_pages(page, 1 << order, 0); | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 509 | 	local_irq_save(flags); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 510 | 	__count_vm_events(PGFREE, 1 << order); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 511 | 	free_one_page(page_zone(page), page, order); | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 512 | 	local_irq_restore(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | } | 
 | 514 |  | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 515 | /* | 
 | 516 |  * permit the bootmem allocator to evade page validation on high-order frees | 
 | 517 |  */ | 
 | 518 | void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) | 
 | 519 | { | 
 | 520 | 	if (order == 0) { | 
 | 521 | 		__ClearPageReserved(page); | 
 | 522 | 		set_page_count(page, 0); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 523 | 		set_page_refcounted(page); | 
| Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 524 | 		__free_page(page); | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 525 | 	} else { | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 526 | 		int loop; | 
 | 527 |  | 
| Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 528 | 		prefetchw(page); | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 529 | 		for (loop = 0; loop < BITS_PER_LONG; loop++) { | 
 | 530 | 			struct page *p = &page[loop]; | 
 | 531 |  | 
| Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 532 | 			if (loop + 1 < BITS_PER_LONG) | 
 | 533 | 				prefetchw(p + 1); | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 534 | 			__ClearPageReserved(p); | 
 | 535 | 			set_page_count(p, 0); | 
 | 536 | 		} | 
 | 537 |  | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 538 | 		set_page_refcounted(page); | 
| Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 539 | 		__free_pages(page, order); | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 540 | 	} | 
 | 541 | } | 
 | 542 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 |  | 
 | 544 | /* | 
 | 545 |  * The order of subdivision here is critical for the IO subsystem. | 
 | 546 |  * Please do not alter this order without good reasons and regression | 
 | 547 |  * testing. Specifically, as large blocks of memory are subdivided, | 
 | 548 |  * the order in which smaller blocks are delivered depends on the order | 
 | 549 |  * they're subdivided in this function. This is the primary factor | 
 | 550 |  * influencing the order in which pages are delivered to the IO | 
 | 551 |  * subsystem according to empirical testing, and this is also justified | 
 | 552 |  * by considering the behavior of a buddy system containing a single | 
 | 553 |  * large block of memory acted on by a series of small allocations. | 
 | 554 |  * This behavior is a critical factor in sglist merging's success. | 
 | 555 |  * | 
 | 556 |  * -- wli | 
 | 557 |  */ | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 558 | static inline void expand(struct zone *zone, struct page *page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 |  	int low, int high, struct free_area *area) | 
 | 560 | { | 
 | 561 | 	unsigned long size = 1 << high; | 
 | 562 |  | 
 | 563 | 	while (high > low) { | 
 | 564 | 		area--; | 
 | 565 | 		high--; | 
 | 566 | 		size >>= 1; | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 567 | 		VM_BUG_ON(bad_range(zone, &page[size])); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | 		list_add(&page[size].lru, &area->free_list); | 
 | 569 | 		area->nr_free++; | 
 | 570 | 		set_page_order(&page[size], high); | 
 | 571 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | } | 
 | 573 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | /* | 
 | 575 |  * This page is about to be returned from the page allocator | 
 | 576 |  */ | 
| Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 577 | static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | { | 
| Nick Piggin | 92be2e3 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 579 | 	if (unlikely(page_mapcount(page) | | 
 | 580 | 		(page->mapping != NULL)  | | 
 | 581 | 		(page_count(page) != 0)  | | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 582 | 		(page->flags & ( | 
 | 583 | 			1 << PG_lru	| | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | 			1 << PG_private	| | 
 | 585 | 			1 << PG_locked	| | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | 			1 << PG_active	| | 
 | 587 | 			1 << PG_dirty	| | 
 | 588 | 			1 << PG_reclaim	| | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 589 | 			1 << PG_slab    | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | 			1 << PG_swapcache | | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 591 | 			1 << PG_writeback | | 
| Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 592 | 			1 << PG_reserved | | 
 | 593 | 			1 << PG_buddy )))) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 594 | 		bad_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 |  | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 596 | 	/* | 
 | 597 | 	 * For now, we report if PG_reserved was found set, but do not | 
 | 598 | 	 * clear it, and do not allocate the page: as a safety net. | 
 | 599 | 	 */ | 
 | 600 | 	if (PageReserved(page)) | 
 | 601 | 		return 1; | 
 | 602 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | 	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | | 
 | 604 | 			1 << PG_referenced | 1 << PG_arch_1 | | 
 | 605 | 			1 << PG_checked | 1 << PG_mappedtodisk); | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 606 | 	set_page_private(page, 0); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 607 | 	set_page_refcounted(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | 	kernel_map_pages(page, 1 << order, 1); | 
| Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 609 |  | 
 | 610 | 	if (gfp_flags & __GFP_ZERO) | 
 | 611 | 		prep_zero_page(page, order, gfp_flags); | 
 | 612 |  | 
 | 613 | 	if (order && (gfp_flags & __GFP_COMP)) | 
 | 614 | 		prep_compound_page(page, order); | 
 | 615 |  | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 616 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | } | 
 | 618 |  | 
 | 619 | /*  | 
 | 620 |  * Do the hard work of removing an element from the buddy allocator. | 
 | 621 |  * Call me with the zone->lock already held. | 
 | 622 |  */ | 
 | 623 | static struct page *__rmqueue(struct zone *zone, unsigned int order) | 
 | 624 | { | 
 | 625 | 	struct free_area * area; | 
 | 626 | 	unsigned int current_order; | 
 | 627 | 	struct page *page; | 
 | 628 |  | 
 | 629 | 	for (current_order = order; current_order < MAX_ORDER; ++current_order) { | 
 | 630 | 		area = zone->free_area + current_order; | 
 | 631 | 		if (list_empty(&area->free_list)) | 
 | 632 | 			continue; | 
 | 633 |  | 
 | 634 | 		page = list_entry(area->free_list.next, struct page, lru); | 
 | 635 | 		list_del(&page->lru); | 
 | 636 | 		rmv_page_order(page); | 
 | 637 | 		area->nr_free--; | 
 | 638 | 		zone->free_pages -= 1UL << order; | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 639 | 		expand(zone, page, order, current_order, area); | 
 | 640 | 		return page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | 	} | 
 | 642 |  | 
 | 643 | 	return NULL; | 
 | 644 | } | 
 | 645 |  | 
 | 646 | /*  | 
 | 647 |  * Obtain a specified number of elements from the buddy allocator, all under | 
 | 648 |  * a single hold of the lock, for efficiency.  Add them to the supplied list. | 
 | 649 |  * Returns the number of new pages which were placed at *list. | 
 | 650 |  */ | 
 | 651 | static int rmqueue_bulk(struct zone *zone, unsigned int order,  | 
 | 652 | 			unsigned long count, struct list_head *list) | 
 | 653 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | 	 | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 656 | 	spin_lock(&zone->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | 	for (i = 0; i < count; ++i) { | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 658 | 		struct page *page = __rmqueue(zone, order); | 
 | 659 | 		if (unlikely(page == NULL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | 		list_add_tail(&page->lru, list); | 
 | 662 | 	} | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 663 | 	spin_unlock(&zone->lock); | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 664 | 	return i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | } | 
 | 666 |  | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 667 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 668 | /* | 
 | 669 |  * Called from the slab reaper to drain pagesets on a particular node that | 
| Christoph Lameter | 39bbcb8 | 2006-09-25 23:31:49 -0700 | [diff] [blame] | 670 |  * belongs to the currently executing processor. | 
| Christoph Lameter | 879336c | 2006-03-22 00:09:08 -0800 | [diff] [blame] | 671 |  * Note that this function must be called with the thread pinned to | 
 | 672 |  * a single processor. | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 673 |  */ | 
 | 674 | void drain_node_pages(int nodeid) | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 675 | { | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 676 | 	int i; | 
 | 677 | 	enum zone_type z; | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 678 | 	unsigned long flags; | 
 | 679 |  | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 680 | 	for (z = 0; z < MAX_NR_ZONES; z++) { | 
 | 681 | 		struct zone *zone = NODE_DATA(nodeid)->node_zones + z; | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 682 | 		struct per_cpu_pageset *pset; | 
 | 683 |  | 
| Christoph Lameter | 39bbcb8 | 2006-09-25 23:31:49 -0700 | [diff] [blame] | 684 | 		if (!populated_zone(zone)) | 
 | 685 | 			continue; | 
 | 686 |  | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 687 | 		pset = zone_pcp(zone, smp_processor_id()); | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 688 | 		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { | 
 | 689 | 			struct per_cpu_pages *pcp; | 
 | 690 |  | 
 | 691 | 			pcp = &pset->pcp[i]; | 
| Christoph Lameter | 879336c | 2006-03-22 00:09:08 -0800 | [diff] [blame] | 692 | 			if (pcp->count) { | 
 | 693 | 				local_irq_save(flags); | 
 | 694 | 				free_pages_bulk(zone, pcp->count, &pcp->list, 0); | 
 | 695 | 				pcp->count = 0; | 
 | 696 | 				local_irq_restore(flags); | 
 | 697 | 			} | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 698 | 		} | 
 | 699 | 	} | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 700 | } | 
 | 701 | #endif | 
 | 702 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) | 
 | 704 | static void __drain_pages(unsigned int cpu) | 
 | 705 | { | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 706 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | 	struct zone *zone; | 
 | 708 | 	int i; | 
 | 709 |  | 
 | 710 | 	for_each_zone(zone) { | 
 | 711 | 		struct per_cpu_pageset *pset; | 
 | 712 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 713 | 		pset = zone_pcp(zone, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | 		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { | 
 | 715 | 			struct per_cpu_pages *pcp; | 
 | 716 |  | 
 | 717 | 			pcp = &pset->pcp[i]; | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 718 | 			local_irq_save(flags); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 719 | 			free_pages_bulk(zone, pcp->count, &pcp->list, 0); | 
 | 720 | 			pcp->count = 0; | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 721 | 			local_irq_restore(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | 		} | 
 | 723 | 	} | 
 | 724 | } | 
 | 725 | #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ | 
 | 726 |  | 
 | 727 | #ifdef CONFIG_PM | 
 | 728 |  | 
 | 729 | void mark_free_pages(struct zone *zone) | 
 | 730 | { | 
| Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 731 | 	unsigned long pfn, max_zone_pfn; | 
 | 732 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | 	int order; | 
 | 734 | 	struct list_head *curr; | 
 | 735 |  | 
 | 736 | 	if (!zone->spanned_pages) | 
 | 737 | 		return; | 
 | 738 |  | 
 | 739 | 	spin_lock_irqsave(&zone->lock, flags); | 
| Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 740 |  | 
 | 741 | 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 
 | 742 | 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 
 | 743 | 		if (pfn_valid(pfn)) { | 
 | 744 | 			struct page *page = pfn_to_page(pfn); | 
 | 745 |  | 
 | 746 | 			if (!PageNosave(page)) | 
 | 747 | 				ClearPageNosaveFree(page); | 
 | 748 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 |  | 
 | 750 | 	for (order = MAX_ORDER - 1; order >= 0; --order) | 
 | 751 | 		list_for_each(curr, &zone->free_area[order].free_list) { | 
| Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 752 | 			unsigned long i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 |  | 
| Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 754 | 			pfn = page_to_pfn(list_entry(curr, struct page, lru)); | 
 | 755 | 			for (i = 0; i < (1UL << order); i++) | 
 | 756 | 				SetPageNosaveFree(pfn_to_page(pfn + i)); | 
 | 757 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
 | 760 | } | 
 | 761 |  | 
 | 762 | /* | 
 | 763 |  * Spill all of this CPU's per-cpu pages back into the buddy allocator. | 
 | 764 |  */ | 
 | 765 | void drain_local_pages(void) | 
 | 766 | { | 
 | 767 | 	unsigned long flags; | 
 | 768 |  | 
 | 769 | 	local_irq_save(flags);	 | 
 | 770 | 	__drain_pages(smp_processor_id()); | 
 | 771 | 	local_irq_restore(flags);	 | 
 | 772 | } | 
 | 773 | #endif /* CONFIG_PM */ | 
 | 774 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | /* | 
 | 776 |  * Free a 0-order page | 
 | 777 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | static void fastcall free_hot_cold_page(struct page *page, int cold) | 
 | 779 | { | 
 | 780 | 	struct zone *zone = page_zone(page); | 
 | 781 | 	struct per_cpu_pages *pcp; | 
 | 782 | 	unsigned long flags; | 
 | 783 |  | 
 | 784 | 	arch_free_page(page, 0); | 
 | 785 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | 	if (PageAnon(page)) | 
 | 787 | 		page->mapping = NULL; | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 788 | 	if (free_pages_check(page)) | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 789 | 		return; | 
 | 790 |  | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 791 | 	kernel_map_pages(page, 1, 0); | 
 | 792 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 793 | 	pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | 	local_irq_save(flags); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 795 | 	__count_vm_event(PGFREE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | 	list_add(&page->lru, &pcp->list); | 
 | 797 | 	pcp->count++; | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 798 | 	if (pcp->count >= pcp->high) { | 
 | 799 | 		free_pages_bulk(zone, pcp->batch, &pcp->list, 0); | 
 | 800 | 		pcp->count -= pcp->batch; | 
 | 801 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | 	local_irq_restore(flags); | 
 | 803 | 	put_cpu(); | 
 | 804 | } | 
 | 805 |  | 
 | 806 | void fastcall free_hot_page(struct page *page) | 
 | 807 | { | 
 | 808 | 	free_hot_cold_page(page, 0); | 
 | 809 | } | 
 | 810 | 	 | 
 | 811 | void fastcall free_cold_page(struct page *page) | 
 | 812 | { | 
 | 813 | 	free_hot_cold_page(page, 1); | 
 | 814 | } | 
 | 815 |  | 
| Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 816 | /* | 
 | 817 |  * split_page takes a non-compound higher-order page, and splits it into | 
 | 818 |  * n (1<<order) sub-pages: page[0..n] | 
 | 819 |  * Each sub-page must be freed individually. | 
 | 820 |  * | 
 | 821 |  * Note: this is probably too low level an operation for use in drivers. | 
 | 822 |  * Please consult with lkml before using this in your driver. | 
 | 823 |  */ | 
 | 824 | void split_page(struct page *page, unsigned int order) | 
 | 825 | { | 
 | 826 | 	int i; | 
 | 827 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 828 | 	VM_BUG_ON(PageCompound(page)); | 
 | 829 | 	VM_BUG_ON(!page_count(page)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 830 | 	for (i = 1; i < (1 << order); i++) | 
 | 831 | 		set_page_refcounted(page + i); | 
| Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 832 | } | 
| Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 833 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | /* | 
 | 835 |  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But | 
 | 836 |  * we cheat by calling it from here, in the order > 0 path.  Saves a branch | 
 | 837 |  * or two. | 
 | 838 |  */ | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 839 | static struct page *buffered_rmqueue(struct zonelist *zonelist, | 
 | 840 | 			struct zone *zone, int order, gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | { | 
 | 842 | 	unsigned long flags; | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 843 | 	struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | 	int cold = !!(gfp_flags & __GFP_COLD); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 845 | 	int cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 |  | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 847 | again: | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 848 | 	cpu  = get_cpu(); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 849 | 	if (likely(order == 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | 		struct per_cpu_pages *pcp; | 
 | 851 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 852 | 		pcp = &zone_pcp(zone, cpu)->pcp[cold]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | 		local_irq_save(flags); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 854 | 		if (!pcp->count) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | 			pcp->count += rmqueue_bulk(zone, 0, | 
 | 856 | 						pcp->batch, &pcp->list); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 857 | 			if (unlikely(!pcp->count)) | 
 | 858 | 				goto failed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | 		} | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 860 | 		page = list_entry(pcp->list.next, struct page, lru); | 
 | 861 | 		list_del(&page->lru); | 
 | 862 | 		pcp->count--; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 863 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | 		spin_lock_irqsave(&zone->lock, flags); | 
 | 865 | 		page = __rmqueue(zone, order); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 866 | 		spin_unlock(&zone->lock); | 
 | 867 | 		if (!page) | 
 | 868 | 			goto failed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | 	} | 
 | 870 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 871 | 	__count_zone_vm_events(PGALLOC, zone, 1 << order); | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 872 | 	zone_statistics(zonelist, zone); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 873 | 	local_irq_restore(flags); | 
 | 874 | 	put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 |  | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 876 | 	VM_BUG_ON(bad_range(zone, page)); | 
| Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 877 | 	if (prep_new_page(page, order, gfp_flags)) | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 878 | 		goto again; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | 	return page; | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 880 |  | 
 | 881 | failed: | 
 | 882 | 	local_irq_restore(flags); | 
 | 883 | 	put_cpu(); | 
 | 884 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | } | 
 | 886 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 887 | #define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */ | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 888 | #define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */ | 
 | 889 | #define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */ | 
 | 890 | #define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */ | 
 | 891 | #define ALLOC_HARDER		0x10 /* try to alloc harder */ | 
 | 892 | #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */ | 
 | 893 | #define ALLOC_CPUSET		0x40 /* check for correct cpuset */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 894 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | /* | 
 | 896 |  * Return 1 if free pages are above 'mark'. This takes into account the order | 
 | 897 |  * of the allocation. | 
 | 898 |  */ | 
 | 899 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 900 | 		      int classzone_idx, int alloc_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | { | 
 | 902 | 	/* free_pages my go negative - that's OK */ | 
 | 903 | 	long min = mark, free_pages = z->free_pages - (1 << order) + 1; | 
 | 904 | 	int o; | 
 | 905 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 906 | 	if (alloc_flags & ALLOC_HIGH) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | 		min -= min / 2; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 908 | 	if (alloc_flags & ALLOC_HARDER) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | 		min -= min / 4; | 
 | 910 |  | 
 | 911 | 	if (free_pages <= min + z->lowmem_reserve[classzone_idx]) | 
 | 912 | 		return 0; | 
 | 913 | 	for (o = 0; o < order; o++) { | 
 | 914 | 		/* At the next order, this order's pages become unavailable */ | 
 | 915 | 		free_pages -= z->free_area[o].nr_free << o; | 
 | 916 |  | 
 | 917 | 		/* Require fewer higher order pages to be free */ | 
 | 918 | 		min >>= 1; | 
 | 919 |  | 
 | 920 | 		if (free_pages <= min) | 
 | 921 | 			return 0; | 
 | 922 | 	} | 
 | 923 | 	return 1; | 
 | 924 | } | 
 | 925 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 926 | /* | 
 | 927 |  * get_page_from_freeliest goes through the zonelist trying to allocate | 
 | 928 |  * a page. | 
 | 929 |  */ | 
 | 930 | static struct page * | 
 | 931 | get_page_from_freelist(gfp_t gfp_mask, unsigned int order, | 
 | 932 | 		struct zonelist *zonelist, int alloc_flags) | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 933 | { | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 934 | 	struct zone **z = zonelist->zones; | 
 | 935 | 	struct page *page = NULL; | 
 | 936 | 	int classzone_idx = zone_idx(*z); | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 937 | 	struct zone *zone; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 938 |  | 
 | 939 | 	/* | 
 | 940 | 	 * Go through the zonelist once, looking for a zone with enough free. | 
 | 941 | 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. | 
 | 942 | 	 */ | 
 | 943 | 	do { | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 944 | 		zone = *z; | 
| Christoph Lameter | 08e0f6a | 2006-09-27 01:50:06 -0700 | [diff] [blame] | 945 | 		if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 946 | 			zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) | 
| Christoph Lameter | 9b819d2 | 2006-09-25 23:31:40 -0700 | [diff] [blame] | 947 | 				break; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 948 | 		if ((alloc_flags & ALLOC_CPUSET) && | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 949 | 				!cpuset_zone_allowed(zone, gfp_mask)) | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 950 | 			continue; | 
 | 951 |  | 
 | 952 | 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 953 | 			unsigned long mark; | 
 | 954 | 			if (alloc_flags & ALLOC_WMARK_MIN) | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 955 | 				mark = zone->pages_min; | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 956 | 			else if (alloc_flags & ALLOC_WMARK_LOW) | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 957 | 				mark = zone->pages_low; | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 958 | 			else | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 959 | 				mark = zone->pages_high; | 
 | 960 | 			if (!zone_watermark_ok(zone , order, mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 961 | 				    classzone_idx, alloc_flags)) | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 962 | 				if (!zone_reclaim_mode || | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 963 | 				    !zone_reclaim(zone, gfp_mask, order)) | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 964 | 					continue; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 965 | 		} | 
 | 966 |  | 
| Christoph Lameter | 1192d52 | 2006-09-25 23:31:45 -0700 | [diff] [blame] | 967 | 		page = buffered_rmqueue(zonelist, zone, order, gfp_mask); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 968 | 		if (page) { | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 969 | 			break; | 
 | 970 | 		} | 
 | 971 | 	} while (*(++z) != NULL); | 
 | 972 | 	return page; | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 973 | } | 
 | 974 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | /* | 
 | 976 |  * This is the 'heart' of the zoned buddy allocator. | 
 | 977 |  */ | 
 | 978 | struct page * fastcall | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 979 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | 		struct zonelist *zonelist) | 
 | 981 | { | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 982 | 	const gfp_t wait = gfp_mask & __GFP_WAIT; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 983 | 	struct zone **z; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | 	struct page *page; | 
 | 985 | 	struct reclaim_state reclaim_state; | 
 | 986 | 	struct task_struct *p = current; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | 	int do_retry; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 988 | 	int alloc_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | 	int did_some_progress; | 
 | 990 |  | 
 | 991 | 	might_sleep_if(wait); | 
 | 992 |  | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 993 | restart: | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 994 | 	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 996 | 	if (unlikely(*z == NULL)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | 		/* Should this ever happen?? */ | 
 | 998 | 		return NULL; | 
 | 999 | 	} | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 1000 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1001 | 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 1002 | 				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1003 | 	if (page) | 
 | 1004 | 		goto got_pg; | 
 | 1005 |  | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 1006 | 	do { | 
| Chris Wright | 43b0bc0 | 2006-06-25 05:47:55 -0700 | [diff] [blame] | 1007 | 		wakeup_kswapd(*z, order); | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 1008 | 	} while (*(++z)); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1009 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 1010 | 	/* | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1011 | 	 * OK, we're below the kswapd watermark and have kicked background | 
 | 1012 | 	 * reclaim. Now things get more complex, so set up alloc_flags according | 
 | 1013 | 	 * to how we want to proceed. | 
 | 1014 | 	 * | 
 | 1015 | 	 * The caller may dip into page reserves a bit more if the caller | 
 | 1016 | 	 * cannot run direct reclaim, or if the caller has realtime scheduling | 
| Paul Jackson | 4eac915 | 2006-01-11 12:17:19 -0800 | [diff] [blame] | 1017 | 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will | 
 | 1018 | 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 1019 | 	 */ | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 1020 | 	alloc_flags = ALLOC_WMARK_MIN; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1021 | 	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) | 
 | 1022 | 		alloc_flags |= ALLOC_HARDER; | 
 | 1023 | 	if (gfp_mask & __GFP_HIGH) | 
 | 1024 | 		alloc_flags |= ALLOC_HIGH; | 
| Paul Jackson | bdd804f | 2006-05-20 15:00:09 -0700 | [diff] [blame] | 1025 | 	if (wait) | 
 | 1026 | 		alloc_flags |= ALLOC_CPUSET; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 |  | 
 | 1028 | 	/* | 
 | 1029 | 	 * Go through the zonelist again. Let __GFP_HIGH and allocations | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1030 | 	 * coming from realtime tasks go deeper into reserves. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | 	 * | 
 | 1032 | 	 * This is the last chance, in general, before the goto nopage. | 
 | 1033 | 	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 1034 | 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | 	 */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1036 | 	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); | 
 | 1037 | 	if (page) | 
 | 1038 | 		goto got_pg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 |  | 
 | 1040 | 	/* This allocation should allow future memory freeing. */ | 
| Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 1041 |  | 
 | 1042 | 	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) | 
 | 1043 | 			&& !in_interrupt()) { | 
 | 1044 | 		if (!(gfp_mask & __GFP_NOMEMALLOC)) { | 
| Kirill Korotaev | 885036d | 2005-11-13 16:06:41 -0800 | [diff] [blame] | 1045 | nofail_alloc: | 
| Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 1046 | 			/* go through the zonelist yet again, ignoring mins */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1047 | 			page = get_page_from_freelist(gfp_mask, order, | 
| Paul Jackson | 47f3a86 | 2006-01-06 00:10:32 -0800 | [diff] [blame] | 1048 | 				zonelist, ALLOC_NO_WATERMARKS); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1049 | 			if (page) | 
 | 1050 | 				goto got_pg; | 
| Kirill Korotaev | 885036d | 2005-11-13 16:06:41 -0800 | [diff] [blame] | 1051 | 			if (gfp_mask & __GFP_NOFAIL) { | 
 | 1052 | 				blk_congestion_wait(WRITE, HZ/50); | 
 | 1053 | 				goto nofail_alloc; | 
 | 1054 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | 		} | 
 | 1056 | 		goto nopage; | 
 | 1057 | 	} | 
 | 1058 |  | 
 | 1059 | 	/* Atomic allocations - we can't balance anything */ | 
 | 1060 | 	if (!wait) | 
 | 1061 | 		goto nopage; | 
 | 1062 |  | 
 | 1063 | rebalance: | 
 | 1064 | 	cond_resched(); | 
 | 1065 |  | 
 | 1066 | 	/* We now go into synchronous reclaim */ | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1067 | 	cpuset_memory_pressure_bump(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | 	p->flags |= PF_MEMALLOC; | 
 | 1069 | 	reclaim_state.reclaimed_slab = 0; | 
 | 1070 | 	p->reclaim_state = &reclaim_state; | 
 | 1071 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1072 | 	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 |  | 
 | 1074 | 	p->reclaim_state = NULL; | 
 | 1075 | 	p->flags &= ~PF_MEMALLOC; | 
 | 1076 |  | 
 | 1077 | 	cond_resched(); | 
 | 1078 |  | 
 | 1079 | 	if (likely(did_some_progress)) { | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1080 | 		page = get_page_from_freelist(gfp_mask, order, | 
 | 1081 | 						zonelist, alloc_flags); | 
 | 1082 | 		if (page) | 
 | 1083 | 			goto got_pg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | 	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { | 
 | 1085 | 		/* | 
 | 1086 | 		 * Go through the zonelist yet one more time, keep | 
 | 1087 | 		 * very high watermark here, this is only to catch | 
 | 1088 | 		 * a parallel oom killing, we must fail if we're still | 
 | 1089 | 		 * under heavy pressure. | 
 | 1090 | 		 */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1091 | 		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 1092 | 				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1093 | 		if (page) | 
 | 1094 | 			goto got_pg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 |  | 
| Christoph Lameter | 9b0f8b0 | 2006-02-20 18:27:52 -0800 | [diff] [blame] | 1096 | 		out_of_memory(zonelist, gfp_mask, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | 		goto restart; | 
 | 1098 | 	} | 
 | 1099 |  | 
 | 1100 | 	/* | 
 | 1101 | 	 * Don't let big-order allocations loop unless the caller explicitly | 
 | 1102 | 	 * requests that.  Wait for some write requests to complete then retry. | 
 | 1103 | 	 * | 
 | 1104 | 	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order | 
 | 1105 | 	 * <= 3, but that may not be true in other implementations. | 
 | 1106 | 	 */ | 
 | 1107 | 	do_retry = 0; | 
 | 1108 | 	if (!(gfp_mask & __GFP_NORETRY)) { | 
 | 1109 | 		if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) | 
 | 1110 | 			do_retry = 1; | 
 | 1111 | 		if (gfp_mask & __GFP_NOFAIL) | 
 | 1112 | 			do_retry = 1; | 
 | 1113 | 	} | 
 | 1114 | 	if (do_retry) { | 
 | 1115 | 		blk_congestion_wait(WRITE, HZ/50); | 
 | 1116 | 		goto rebalance; | 
 | 1117 | 	} | 
 | 1118 |  | 
 | 1119 | nopage: | 
 | 1120 | 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { | 
 | 1121 | 		printk(KERN_WARNING "%s: page allocation failure." | 
 | 1122 | 			" order:%d, mode:0x%x\n", | 
 | 1123 | 			p->comm, order, gfp_mask); | 
 | 1124 | 		dump_stack(); | 
| Janet Morgan | 578c2fd | 2005-06-21 17:14:56 -0700 | [diff] [blame] | 1125 | 		show_mem(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | got_pg: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | 	return page; | 
 | 1129 | } | 
 | 1130 |  | 
 | 1131 | EXPORT_SYMBOL(__alloc_pages); | 
 | 1132 |  | 
 | 1133 | /* | 
 | 1134 |  * Common helper functions. | 
 | 1135 |  */ | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1136 | fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1137 | { | 
 | 1138 | 	struct page * page; | 
 | 1139 | 	page = alloc_pages(gfp_mask, order); | 
 | 1140 | 	if (!page) | 
 | 1141 | 		return 0; | 
 | 1142 | 	return (unsigned long) page_address(page); | 
 | 1143 | } | 
 | 1144 |  | 
 | 1145 | EXPORT_SYMBOL(__get_free_pages); | 
 | 1146 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1147 | fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | { | 
 | 1149 | 	struct page * page; | 
 | 1150 |  | 
 | 1151 | 	/* | 
 | 1152 | 	 * get_zeroed_page() returns a 32-bit address, which cannot represent | 
 | 1153 | 	 * a highmem page | 
 | 1154 | 	 */ | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 1155 | 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 |  | 
 | 1157 | 	page = alloc_pages(gfp_mask | __GFP_ZERO, 0); | 
 | 1158 | 	if (page) | 
 | 1159 | 		return (unsigned long) page_address(page); | 
 | 1160 | 	return 0; | 
 | 1161 | } | 
 | 1162 |  | 
 | 1163 | EXPORT_SYMBOL(get_zeroed_page); | 
 | 1164 |  | 
 | 1165 | void __pagevec_free(struct pagevec *pvec) | 
 | 1166 | { | 
 | 1167 | 	int i = pagevec_count(pvec); | 
 | 1168 |  | 
 | 1169 | 	while (--i >= 0) | 
 | 1170 | 		free_hot_cold_page(pvec->pages[i], pvec->cold); | 
 | 1171 | } | 
 | 1172 |  | 
 | 1173 | fastcall void __free_pages(struct page *page, unsigned int order) | 
 | 1174 | { | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1175 | 	if (put_page_testzero(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | 		if (order == 0) | 
 | 1177 | 			free_hot_page(page); | 
 | 1178 | 		else | 
 | 1179 | 			__free_pages_ok(page, order); | 
 | 1180 | 	} | 
 | 1181 | } | 
 | 1182 |  | 
 | 1183 | EXPORT_SYMBOL(__free_pages); | 
 | 1184 |  | 
 | 1185 | fastcall void free_pages(unsigned long addr, unsigned int order) | 
 | 1186 | { | 
 | 1187 | 	if (addr != 0) { | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 1188 | 		VM_BUG_ON(!virt_addr_valid((void *)addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | 		__free_pages(virt_to_page((void *)addr), order); | 
 | 1190 | 	} | 
 | 1191 | } | 
 | 1192 |  | 
 | 1193 | EXPORT_SYMBOL(free_pages); | 
 | 1194 |  | 
 | 1195 | /* | 
 | 1196 |  * Total amount of free (allocatable) RAM: | 
 | 1197 |  */ | 
 | 1198 | unsigned int nr_free_pages(void) | 
 | 1199 | { | 
 | 1200 | 	unsigned int sum = 0; | 
 | 1201 | 	struct zone *zone; | 
 | 1202 |  | 
 | 1203 | 	for_each_zone(zone) | 
 | 1204 | 		sum += zone->free_pages; | 
 | 1205 |  | 
 | 1206 | 	return sum; | 
 | 1207 | } | 
 | 1208 |  | 
 | 1209 | EXPORT_SYMBOL(nr_free_pages); | 
 | 1210 |  | 
 | 1211 | #ifdef CONFIG_NUMA | 
 | 1212 | unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) | 
 | 1213 | { | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 1214 | 	unsigned int sum = 0; | 
 | 1215 | 	enum zone_type i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 |  | 
 | 1217 | 	for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 1218 | 		sum += pgdat->node_zones[i].free_pages; | 
 | 1219 |  | 
 | 1220 | 	return sum; | 
 | 1221 | } | 
 | 1222 | #endif | 
 | 1223 |  | 
 | 1224 | static unsigned int nr_free_zone_pages(int offset) | 
 | 1225 | { | 
| Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1226 | 	/* Just pick one node, since fallback list is circular */ | 
 | 1227 | 	pg_data_t *pgdat = NODE_DATA(numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | 	unsigned int sum = 0; | 
 | 1229 |  | 
| Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1230 | 	struct zonelist *zonelist = pgdat->node_zonelists + offset; | 
 | 1231 | 	struct zone **zonep = zonelist->zones; | 
 | 1232 | 	struct zone *zone; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 |  | 
| Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1234 | 	for (zone = *zonep++; zone; zone = *zonep++) { | 
 | 1235 | 		unsigned long size = zone->present_pages; | 
 | 1236 | 		unsigned long high = zone->pages_high; | 
 | 1237 | 		if (size > high) | 
 | 1238 | 			sum += size - high; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 | 	} | 
 | 1240 |  | 
 | 1241 | 	return sum; | 
 | 1242 | } | 
 | 1243 |  | 
 | 1244 | /* | 
 | 1245 |  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL | 
 | 1246 |  */ | 
 | 1247 | unsigned int nr_free_buffer_pages(void) | 
 | 1248 | { | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 1249 | 	return nr_free_zone_pages(gfp_zone(GFP_USER)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1250 | } | 
 | 1251 |  | 
 | 1252 | /* | 
 | 1253 |  * Amount of free RAM allocatable within all zones | 
 | 1254 |  */ | 
 | 1255 | unsigned int nr_free_pagecache_pages(void) | 
 | 1256 | { | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 1257 | 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 | } | 
| Christoph Lameter | 08e0f6a | 2006-09-27 01:50:06 -0700 | [diff] [blame] | 1259 |  | 
 | 1260 | static inline void show_node(struct zone *zone) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | { | 
| Christoph Lameter | 08e0f6a | 2006-09-27 01:50:06 -0700 | [diff] [blame] | 1262 | 	if (NUMA_BUILD) | 
 | 1263 | 		printk("Node %ld ", zone_to_nid(zone)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | void si_meminfo(struct sysinfo *val) | 
 | 1267 | { | 
 | 1268 | 	val->totalram = totalram_pages; | 
 | 1269 | 	val->sharedram = 0; | 
 | 1270 | 	val->freeram = nr_free_pages(); | 
 | 1271 | 	val->bufferram = nr_blockdev_pages(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | 	val->totalhigh = totalhigh_pages; | 
 | 1273 | 	val->freehigh = nr_free_highpages(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | 	val->mem_unit = PAGE_SIZE; | 
 | 1275 | } | 
 | 1276 |  | 
 | 1277 | EXPORT_SYMBOL(si_meminfo); | 
 | 1278 |  | 
 | 1279 | #ifdef CONFIG_NUMA | 
 | 1280 | void si_meminfo_node(struct sysinfo *val, int nid) | 
 | 1281 | { | 
 | 1282 | 	pg_data_t *pgdat = NODE_DATA(nid); | 
 | 1283 |  | 
 | 1284 | 	val->totalram = pgdat->node_present_pages; | 
 | 1285 | 	val->freeram = nr_free_pages_pgdat(pgdat); | 
| Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 1286 | #ifdef CONFIG_HIGHMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1287 | 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; | 
 | 1288 | 	val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; | 
| Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 1289 | #else | 
 | 1290 | 	val->totalhigh = 0; | 
 | 1291 | 	val->freehigh = 0; | 
 | 1292 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | 	val->mem_unit = PAGE_SIZE; | 
 | 1294 | } | 
 | 1295 | #endif | 
 | 1296 |  | 
 | 1297 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 
 | 1298 |  | 
 | 1299 | /* | 
 | 1300 |  * Show free area list (used inside shift_scroll-lock stuff) | 
 | 1301 |  * We also calculate the percentage fragmentation. We do this by counting the | 
 | 1302 |  * memory on each free list with the exception of the first item on the list. | 
 | 1303 |  */ | 
 | 1304 | void show_free_areas(void) | 
 | 1305 | { | 
| Jes Sorensen | c724191 | 2006-09-27 01:50:05 -0700 | [diff] [blame] | 1306 | 	int cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1307 | 	unsigned long active; | 
 | 1308 | 	unsigned long inactive; | 
 | 1309 | 	unsigned long free; | 
 | 1310 | 	struct zone *zone; | 
 | 1311 |  | 
 | 1312 | 	for_each_zone(zone) { | 
| Jes Sorensen | c724191 | 2006-09-27 01:50:05 -0700 | [diff] [blame] | 1313 | 		if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | 			continue; | 
| Jes Sorensen | c724191 | 2006-09-27 01:50:05 -0700 | [diff] [blame] | 1315 |  | 
 | 1316 | 		show_node(zone); | 
 | 1317 | 		printk("%s per-cpu:\n", zone->name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 |  | 
| Dave Jones | 6b482c6 | 2005-11-10 15:45:56 -0500 | [diff] [blame] | 1319 | 		for_each_online_cpu(cpu) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 | 			struct per_cpu_pageset *pageset; | 
 | 1321 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1322 | 			pageset = zone_pcp(zone, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 |  | 
| Jes Sorensen | c724191 | 2006-09-27 01:50:05 -0700 | [diff] [blame] | 1324 | 			printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d   " | 
 | 1325 | 			       "Cold: hi:%5d, btch:%4d usd:%4d\n", | 
 | 1326 | 			       cpu, pageset->pcp[0].high, | 
 | 1327 | 			       pageset->pcp[0].batch, pageset->pcp[0].count, | 
 | 1328 | 			       pageset->pcp[1].high, pageset->pcp[1].batch, | 
 | 1329 | 			       pageset->pcp[1].count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 | 		} | 
 | 1331 | 	} | 
 | 1332 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | 	get_zone_counts(&active, &inactive, &free); | 
 | 1334 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | 	printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " | 
 | 1336 | 		"unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", | 
 | 1337 | 		active, | 
 | 1338 | 		inactive, | 
| Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 1339 | 		global_page_state(NR_FILE_DIRTY), | 
| Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 1340 | 		global_page_state(NR_WRITEBACK), | 
| Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 1341 | 		global_page_state(NR_UNSTABLE_NFS), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1342 | 		nr_free_pages(), | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1343 | 		global_page_state(NR_SLAB_RECLAIMABLE) + | 
 | 1344 | 			global_page_state(NR_SLAB_UNRECLAIMABLE), | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 1345 | 		global_page_state(NR_FILE_MAPPED), | 
| Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 1346 | 		global_page_state(NR_PAGETABLE)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 |  | 
 | 1348 | 	for_each_zone(zone) { | 
 | 1349 | 		int i; | 
 | 1350 |  | 
| Jes Sorensen | c724191 | 2006-09-27 01:50:05 -0700 | [diff] [blame] | 1351 | 		if (!populated_zone(zone)) | 
 | 1352 | 			continue; | 
 | 1353 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | 		show_node(zone); | 
 | 1355 | 		printk("%s" | 
 | 1356 | 			" free:%lukB" | 
 | 1357 | 			" min:%lukB" | 
 | 1358 | 			" low:%lukB" | 
 | 1359 | 			" high:%lukB" | 
 | 1360 | 			" active:%lukB" | 
 | 1361 | 			" inactive:%lukB" | 
 | 1362 | 			" present:%lukB" | 
 | 1363 | 			" pages_scanned:%lu" | 
 | 1364 | 			" all_unreclaimable? %s" | 
 | 1365 | 			"\n", | 
 | 1366 | 			zone->name, | 
 | 1367 | 			K(zone->free_pages), | 
 | 1368 | 			K(zone->pages_min), | 
 | 1369 | 			K(zone->pages_low), | 
 | 1370 | 			K(zone->pages_high), | 
 | 1371 | 			K(zone->nr_active), | 
 | 1372 | 			K(zone->nr_inactive), | 
 | 1373 | 			K(zone->present_pages), | 
 | 1374 | 			zone->pages_scanned, | 
 | 1375 | 			(zone->all_unreclaimable ? "yes" : "no") | 
 | 1376 | 			); | 
 | 1377 | 		printk("lowmem_reserve[]:"); | 
 | 1378 | 		for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 1379 | 			printk(" %lu", zone->lowmem_reserve[i]); | 
 | 1380 | 		printk("\n"); | 
 | 1381 | 	} | 
 | 1382 |  | 
 | 1383 | 	for_each_zone(zone) { | 
| Kirill Korotaev | 8f9de51 | 2006-06-23 02:03:50 -0700 | [diff] [blame] | 1384 |  		unsigned long nr[MAX_ORDER], flags, order, total = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1385 |  | 
| Jes Sorensen | c724191 | 2006-09-27 01:50:05 -0700 | [diff] [blame] | 1386 | 		if (!populated_zone(zone)) | 
 | 1387 | 			continue; | 
 | 1388 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1389 | 		show_node(zone); | 
 | 1390 | 		printk("%s: ", zone->name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 |  | 
 | 1392 | 		spin_lock_irqsave(&zone->lock, flags); | 
 | 1393 | 		for (order = 0; order < MAX_ORDER; order++) { | 
| Kirill Korotaev | 8f9de51 | 2006-06-23 02:03:50 -0700 | [diff] [blame] | 1394 | 			nr[order] = zone->free_area[order].nr_free; | 
 | 1395 | 			total += nr[order] << order; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | 		} | 
 | 1397 | 		spin_unlock_irqrestore(&zone->lock, flags); | 
| Kirill Korotaev | 8f9de51 | 2006-06-23 02:03:50 -0700 | [diff] [blame] | 1398 | 		for (order = 0; order < MAX_ORDER; order++) | 
 | 1399 | 			printk("%lu*%lukB ", nr[order], K(1UL) << order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1400 | 		printk("= %lukB\n", K(total)); | 
 | 1401 | 	} | 
 | 1402 |  | 
 | 1403 | 	show_swap_cache_info(); | 
 | 1404 | } | 
 | 1405 |  | 
 | 1406 | /* | 
 | 1407 |  * Builds allocation fallback zone lists. | 
| Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1408 |  * | 
 | 1409 |  * Add all populated zones of a node to the zonelist. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 |  */ | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1411 | static int __meminit build_zonelists_node(pg_data_t *pgdat, | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 1412 | 			struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1413 | { | 
| Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1414 | 	struct zone *zone; | 
 | 1415 |  | 
| Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 1416 | 	BUG_ON(zone_type >= MAX_NR_ZONES); | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 1417 | 	zone_type++; | 
| Christoph Lameter | 02a68a5 | 2006-01-06 00:11:18 -0800 | [diff] [blame] | 1418 |  | 
 | 1419 | 	do { | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 1420 | 		zone_type--; | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1421 | 		zone = pgdat->node_zones + zone_type; | 
| Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1422 | 		if (populated_zone(zone)) { | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1423 | 			zonelist->zones[nr_zones++] = zone; | 
 | 1424 | 			check_highest_zone(zone_type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 | 		} | 
| Christoph Lameter | 02a68a5 | 2006-01-06 00:11:18 -0800 | [diff] [blame] | 1426 |  | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 1427 | 	} while (zone_type); | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1428 | 	return nr_zones; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | } | 
 | 1430 |  | 
 | 1431 | #ifdef CONFIG_NUMA | 
 | 1432 | #define MAX_NODE_LOAD (num_online_nodes()) | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1433 | static int __meminitdata node_load[MAX_NUMNODES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | /** | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 1435 |  * find_next_best_node - find the next node that should appear in a given node's fallback list | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 |  * @node: node whose fallback list we're appending | 
 | 1437 |  * @used_node_mask: nodemask_t of already used nodes | 
 | 1438 |  * | 
 | 1439 |  * We use a number of factors to determine which is the next node that should | 
 | 1440 |  * appear on a given node's fallback list.  The node should not have appeared | 
 | 1441 |  * already in @node's fallback list, and it should be the next closest node | 
 | 1442 |  * according to the distance array (which contains arbitrary distance values | 
 | 1443 |  * from each node to each node in the system), and should also prefer nodes | 
 | 1444 |  * with no CPUs, since presumably they'll have very little allocation pressure | 
 | 1445 |  * on them otherwise. | 
 | 1446 |  * It returns -1 if no node is found. | 
 | 1447 |  */ | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1448 | static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1449 | { | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1450 | 	int n, val; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | 	int min_val = INT_MAX; | 
 | 1452 | 	int best_node = -1; | 
 | 1453 |  | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1454 | 	/* Use the local node if we haven't already */ | 
 | 1455 | 	if (!node_isset(node, *used_node_mask)) { | 
 | 1456 | 		node_set(node, *used_node_mask); | 
 | 1457 | 		return node; | 
 | 1458 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1459 |  | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1460 | 	for_each_online_node(n) { | 
 | 1461 | 		cpumask_t tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 |  | 
 | 1463 | 		/* Don't want a node to appear more than once */ | 
 | 1464 | 		if (node_isset(n, *used_node_mask)) | 
 | 1465 | 			continue; | 
 | 1466 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | 		/* Use the distance array to find the distance */ | 
 | 1468 | 		val = node_distance(node, n); | 
 | 1469 |  | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1470 | 		/* Penalize nodes under us ("prefer the next node") */ | 
 | 1471 | 		val += (n < node); | 
 | 1472 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | 		/* Give preference to headless and unused nodes */ | 
 | 1474 | 		tmp = node_to_cpumask(n); | 
 | 1475 | 		if (!cpus_empty(tmp)) | 
 | 1476 | 			val += PENALTY_FOR_NODE_WITH_CPUS; | 
 | 1477 |  | 
 | 1478 | 		/* Slight preference for less loaded node */ | 
 | 1479 | 		val *= (MAX_NODE_LOAD*MAX_NUMNODES); | 
 | 1480 | 		val += node_load[n]; | 
 | 1481 |  | 
 | 1482 | 		if (val < min_val) { | 
 | 1483 | 			min_val = val; | 
 | 1484 | 			best_node = n; | 
 | 1485 | 		} | 
 | 1486 | 	} | 
 | 1487 |  | 
 | 1488 | 	if (best_node >= 0) | 
 | 1489 | 		node_set(best_node, *used_node_mask); | 
 | 1490 |  | 
 | 1491 | 	return best_node; | 
 | 1492 | } | 
 | 1493 |  | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1494 | static void __meminit build_zonelists(pg_data_t *pgdat) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 | { | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1496 | 	int j, node, local_node; | 
 | 1497 | 	enum zone_type i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | 	int prev_node, load; | 
 | 1499 | 	struct zonelist *zonelist; | 
 | 1500 | 	nodemask_t used_mask; | 
 | 1501 |  | 
 | 1502 | 	/* initialize zonelists */ | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1503 | 	for (i = 0; i < MAX_NR_ZONES; i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | 		zonelist = pgdat->node_zonelists + i; | 
 | 1505 | 		zonelist->zones[0] = NULL; | 
 | 1506 | 	} | 
 | 1507 |  | 
 | 1508 | 	/* NUMA-aware ordering of nodes */ | 
 | 1509 | 	local_node = pgdat->node_id; | 
 | 1510 | 	load = num_online_nodes(); | 
 | 1511 | 	prev_node = local_node; | 
 | 1512 | 	nodes_clear(used_mask); | 
 | 1513 | 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1514 | 		int distance = node_distance(local_node, node); | 
 | 1515 |  | 
 | 1516 | 		/* | 
 | 1517 | 		 * If another node is sufficiently far away then it is better | 
 | 1518 | 		 * to reclaim pages in a zone before going off node. | 
 | 1519 | 		 */ | 
 | 1520 | 		if (distance > RECLAIM_DISTANCE) | 
 | 1521 | 			zone_reclaim_mode = 1; | 
 | 1522 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | 		/* | 
 | 1524 | 		 * We don't want to pressure a particular node. | 
 | 1525 | 		 * So adding penalty to the first node in same | 
 | 1526 | 		 * distance group to make it round-robin. | 
 | 1527 | 		 */ | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1528 |  | 
 | 1529 | 		if (distance != node_distance(local_node, prev_node)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1530 | 			node_load[node] += load; | 
 | 1531 | 		prev_node = node; | 
 | 1532 | 		load--; | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1533 | 		for (i = 0; i < MAX_NR_ZONES; i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1534 | 			zonelist = pgdat->node_zonelists + i; | 
 | 1535 | 			for (j = 0; zonelist->zones[j] != NULL; j++); | 
 | 1536 |  | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1537 | 	 		j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | 			zonelist->zones[j] = NULL; | 
 | 1539 | 		} | 
 | 1540 | 	} | 
 | 1541 | } | 
 | 1542 |  | 
 | 1543 | #else	/* CONFIG_NUMA */ | 
 | 1544 |  | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1545 | static void __meminit build_zonelists(pg_data_t *pgdat) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | { | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1547 | 	int node, local_node; | 
 | 1548 | 	enum zone_type i,j; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 |  | 
 | 1550 | 	local_node = pgdat->node_id; | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1551 | 	for (i = 0; i < MAX_NR_ZONES; i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | 		struct zonelist *zonelist; | 
 | 1553 |  | 
 | 1554 | 		zonelist = pgdat->node_zonelists + i; | 
 | 1555 |  | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1556 |  		j = build_zonelists_node(pgdat, zonelist, 0, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 |  		/* | 
 | 1558 |  		 * Now we build the zonelist so that it contains the zones | 
 | 1559 |  		 * of all the other nodes. | 
 | 1560 |  		 * We don't want to pressure a particular node, so when | 
 | 1561 |  		 * building the zones for node N, we make sure that the | 
 | 1562 |  		 * zones coming right after the local ones are those from | 
 | 1563 |  		 * node N+1 (modulo N) | 
 | 1564 |  		 */ | 
 | 1565 | 		for (node = local_node + 1; node < MAX_NUMNODES; node++) { | 
 | 1566 | 			if (!node_online(node)) | 
 | 1567 | 				continue; | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1568 | 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | 		} | 
 | 1570 | 		for (node = 0; node < local_node; node++) { | 
 | 1571 | 			if (!node_online(node)) | 
 | 1572 | 				continue; | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 1573 | 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | 		} | 
 | 1575 |  | 
 | 1576 | 		zonelist->zones[j] = NULL; | 
 | 1577 | 	} | 
 | 1578 | } | 
 | 1579 |  | 
 | 1580 | #endif	/* CONFIG_NUMA */ | 
 | 1581 |  | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 1582 | /* return values int ....just for stop_machine_run() */ | 
 | 1583 | static int __meminit __build_all_zonelists(void *dummy) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | { | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 1585 | 	int nid; | 
 | 1586 | 	for_each_online_node(nid) | 
 | 1587 | 		build_zonelists(NODE_DATA(nid)); | 
 | 1588 | 	return 0; | 
 | 1589 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 |  | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 1591 | void __meminit build_all_zonelists(void) | 
 | 1592 | { | 
 | 1593 | 	if (system_state == SYSTEM_BOOTING) { | 
| Randy Dunlap | 423b41d | 2006-09-27 01:50:12 -0700 | [diff] [blame] | 1594 | 		__build_all_zonelists(NULL); | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 1595 | 		cpuset_init_current_mems_allowed(); | 
 | 1596 | 	} else { | 
 | 1597 | 		/* we have to stop all cpus to guaranntee there is no user | 
 | 1598 | 		   of zonelist */ | 
 | 1599 | 		stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); | 
 | 1600 | 		/* cpuset refresh routine should be here */ | 
 | 1601 | 	} | 
| Andrew Morton | bd1e22b | 2006-06-23 02:03:47 -0700 | [diff] [blame] | 1602 | 	vm_total_pages = nr_free_pagecache_pages(); | 
 | 1603 | 	printk("Built %i zonelists.  Total pages: %ld\n", | 
 | 1604 | 			num_online_nodes(), vm_total_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | } | 
 | 1606 |  | 
 | 1607 | /* | 
 | 1608 |  * Helper functions to size the waitqueue hash table. | 
 | 1609 |  * Essentially these want to choose hash table sizes sufficiently | 
 | 1610 |  * large so that collisions trying to wait on pages are rare. | 
 | 1611 |  * But in fact, the number of active page waitqueues on typical | 
 | 1612 |  * systems is ridiculously low, less than 200. So this is even | 
 | 1613 |  * conservative, even though it seems large. | 
 | 1614 |  * | 
 | 1615 |  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to | 
 | 1616 |  * waitqueues, i.e. the size of the waitq table given the number of pages. | 
 | 1617 |  */ | 
 | 1618 | #define PAGES_PER_WAITQUEUE	256 | 
 | 1619 |  | 
| Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1620 | #ifndef CONFIG_MEMORY_HOTPLUG | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 1621 | static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | { | 
 | 1623 | 	unsigned long size = 1; | 
 | 1624 |  | 
 | 1625 | 	pages /= PAGES_PER_WAITQUEUE; | 
 | 1626 |  | 
 | 1627 | 	while (size < pages) | 
 | 1628 | 		size <<= 1; | 
 | 1629 |  | 
 | 1630 | 	/* | 
 | 1631 | 	 * Once we have dozens or even hundreds of threads sleeping | 
 | 1632 | 	 * on IO we've got bigger problems than wait queue collision. | 
 | 1633 | 	 * Limit the size of the wait table to a reasonable size. | 
 | 1634 | 	 */ | 
 | 1635 | 	size = min(size, 4096UL); | 
 | 1636 |  | 
 | 1637 | 	return max(size, 4UL); | 
 | 1638 | } | 
| Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1639 | #else | 
 | 1640 | /* | 
 | 1641 |  * A zone's size might be changed by hot-add, so it is not possible to determine | 
 | 1642 |  * a suitable size for its wait_table.  So we use the maximum size now. | 
 | 1643 |  * | 
 | 1644 |  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie: | 
 | 1645 |  * | 
 | 1646 |  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte. | 
 | 1647 |  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. | 
 | 1648 |  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte. | 
 | 1649 |  * | 
 | 1650 |  * The maximum entries are prepared when a zone's memory is (512K + 256) pages | 
 | 1651 |  * or more by the traditional way. (See above).  It equals: | 
 | 1652 |  * | 
 | 1653 |  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte. | 
 | 1654 |  *    ia64(16K page size)                 : =  ( 8G + 4M)byte. | 
 | 1655 |  *    powerpc (64K page size)             : =  (32G +16M)byte. | 
 | 1656 |  */ | 
 | 1657 | static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) | 
 | 1658 | { | 
 | 1659 | 	return 4096UL; | 
 | 1660 | } | 
 | 1661 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 |  | 
 | 1663 | /* | 
 | 1664 |  * This is an integer logarithm so that shifts can be used later | 
 | 1665 |  * to extract the more random high bits from the multiplicative | 
 | 1666 |  * hash function before the remainder is taken. | 
 | 1667 |  */ | 
 | 1668 | static inline unsigned long wait_table_bits(unsigned long size) | 
 | 1669 | { | 
 | 1670 | 	return ffz(~size); | 
 | 1671 | } | 
 | 1672 |  | 
 | 1673 | #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) | 
 | 1674 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1675 | /* | 
 | 1676 |  * Initially all pages are reserved - free ones are freed | 
 | 1677 |  * up by free_all_bootmem() once the early boot process is | 
 | 1678 |  * done. Non-atomic initialization, single-pass. | 
 | 1679 |  */ | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1680 | void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 | 		unsigned long start_pfn) | 
 | 1682 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1683 | 	struct page *page; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1684 | 	unsigned long end_pfn = start_pfn + size; | 
 | 1685 | 	unsigned long pfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 |  | 
| Greg Ungerer | cbe8dd4 | 2006-01-12 01:05:24 -0800 | [diff] [blame] | 1687 | 	for (pfn = start_pfn; pfn < end_pfn; pfn++) { | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1688 | 		if (!early_pfn_valid(pfn)) | 
 | 1689 | 			continue; | 
 | 1690 | 		page = pfn_to_page(pfn); | 
 | 1691 | 		set_page_links(page, zone, nid, pfn); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 1692 | 		init_page_count(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1693 | 		reset_page_mapcount(page); | 
 | 1694 | 		SetPageReserved(page); | 
 | 1695 | 		INIT_LIST_HEAD(&page->lru); | 
 | 1696 | #ifdef WANT_PAGE_VIRTUAL | 
 | 1697 | 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */ | 
 | 1698 | 		if (!is_highmem_idx(zone)) | 
| Bob Picco | 3212c6b | 2005-06-27 14:36:28 -0700 | [diff] [blame] | 1699 | 			set_page_address(page, __va(pfn << PAGE_SHIFT)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1700 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 | 	} | 
 | 1702 | } | 
 | 1703 |  | 
 | 1704 | void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, | 
 | 1705 | 				unsigned long size) | 
 | 1706 | { | 
 | 1707 | 	int order; | 
 | 1708 | 	for (order = 0; order < MAX_ORDER ; order++) { | 
 | 1709 | 		INIT_LIST_HEAD(&zone->free_area[order].free_list); | 
 | 1710 | 		zone->free_area[order].nr_free = 0; | 
 | 1711 | 	} | 
 | 1712 | } | 
 | 1713 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1714 | #define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr) | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 1715 | void zonetable_add(struct zone *zone, int nid, enum zone_type zid, | 
 | 1716 | 		unsigned long pfn, unsigned long size) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1717 | { | 
 | 1718 | 	unsigned long snum = pfn_to_section_nr(pfn); | 
 | 1719 | 	unsigned long end = pfn_to_section_nr(pfn + size); | 
 | 1720 |  | 
 | 1721 | 	if (FLAGS_HAS_NODE) | 
 | 1722 | 		zone_table[ZONETABLE_INDEX(nid, zid)] = zone; | 
 | 1723 | 	else | 
 | 1724 | 		for (; snum <= end; snum++) | 
 | 1725 | 			zone_table[ZONETABLE_INDEX(snum, zid)] = zone; | 
 | 1726 | } | 
 | 1727 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1728 | #ifndef __HAVE_ARCH_MEMMAP_INIT | 
 | 1729 | #define memmap_init(size, nid, zone, start_pfn) \ | 
 | 1730 | 	memmap_init_zone((size), (nid), (zone), (start_pfn)) | 
 | 1731 | #endif | 
 | 1732 |  | 
| Ashok Raj | 6292d9a | 2006-02-01 03:04:44 -0800 | [diff] [blame] | 1733 | static int __cpuinit zone_batchsize(struct zone *zone) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1734 | { | 
 | 1735 | 	int batch; | 
 | 1736 |  | 
 | 1737 | 	/* | 
 | 1738 | 	 * The per-cpu-pages pools are set to around 1000th of the | 
| Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1739 | 	 * size of the zone.  But no more than 1/2 of a meg. | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1740 | 	 * | 
 | 1741 | 	 * OK, so we don't know how big the cache is.  So guess. | 
 | 1742 | 	 */ | 
 | 1743 | 	batch = zone->present_pages / 1024; | 
| Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1744 | 	if (batch * PAGE_SIZE > 512 * 1024) | 
 | 1745 | 		batch = (512 * 1024) / PAGE_SIZE; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1746 | 	batch /= 4;		/* We effectively *= 4 below */ | 
 | 1747 | 	if (batch < 1) | 
 | 1748 | 		batch = 1; | 
 | 1749 |  | 
 | 1750 | 	/* | 
| Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1751 | 	 * Clamp the batch to a 2^n - 1 value. Having a power | 
 | 1752 | 	 * of 2 value was found to be more likely to have | 
 | 1753 | 	 * suboptimal cache aliasing properties in some cases. | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1754 | 	 * | 
| Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1755 | 	 * For example if 2 tasks are alternately allocating | 
 | 1756 | 	 * batches of pages, one task can end up with a lot | 
 | 1757 | 	 * of pages of one half of the possible page colors | 
 | 1758 | 	 * and the other with pages of the other colors. | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1759 | 	 */ | 
| Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1760 | 	batch = (1 << (fls(batch + batch/2)-1)) - 1; | 
| Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1761 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1762 | 	return batch; | 
 | 1763 | } | 
 | 1764 |  | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1765 | inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 
 | 1766 | { | 
 | 1767 | 	struct per_cpu_pages *pcp; | 
 | 1768 |  | 
| Magnus Damm | 1c6fe94 | 2005-10-26 01:58:59 -0700 | [diff] [blame] | 1769 | 	memset(p, 0, sizeof(*p)); | 
 | 1770 |  | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1771 | 	pcp = &p->pcp[0];		/* hot */ | 
 | 1772 | 	pcp->count = 0; | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1773 | 	pcp->high = 6 * batch; | 
 | 1774 | 	pcp->batch = max(1UL, 1 * batch); | 
 | 1775 | 	INIT_LIST_HEAD(&pcp->list); | 
 | 1776 |  | 
 | 1777 | 	pcp = &p->pcp[1];		/* cold*/ | 
 | 1778 | 	pcp->count = 0; | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1779 | 	pcp->high = 2 * batch; | 
| Seth, Rohit | e46a5e2 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 1780 | 	pcp->batch = max(1UL, batch/2); | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1781 | 	INIT_LIST_HEAD(&pcp->list); | 
 | 1782 | } | 
 | 1783 |  | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 1784 | /* | 
 | 1785 |  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist | 
 | 1786 |  * to the value high for the pageset p. | 
 | 1787 |  */ | 
 | 1788 |  | 
 | 1789 | static void setup_pagelist_highmark(struct per_cpu_pageset *p, | 
 | 1790 | 				unsigned long high) | 
 | 1791 | { | 
 | 1792 | 	struct per_cpu_pages *pcp; | 
 | 1793 |  | 
 | 1794 | 	pcp = &p->pcp[0]; /* hot list */ | 
 | 1795 | 	pcp->high = high; | 
 | 1796 | 	pcp->batch = max(1UL, high/4); | 
 | 1797 | 	if ((high/4) > (PAGE_SHIFT * 8)) | 
 | 1798 | 		pcp->batch = PAGE_SHIFT * 8; | 
 | 1799 | } | 
 | 1800 |  | 
 | 1801 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1802 | #ifdef CONFIG_NUMA | 
 | 1803 | /* | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1804 |  * Boot pageset table. One per cpu which is going to be used for all | 
 | 1805 |  * zones and all nodes. The parameters will be set in such a way | 
 | 1806 |  * that an item put on a list will immediately be handed over to | 
 | 1807 |  * the buddy list. This is safe since pageset manipulation is done | 
 | 1808 |  * with interrupts disabled. | 
 | 1809 |  * | 
 | 1810 |  * Some NUMA counter updates may also be caught by the boot pagesets. | 
| Christoph Lameter | b7c84c6 | 2005-06-22 20:26:07 -0700 | [diff] [blame] | 1811 |  * | 
 | 1812 |  * The boot_pagesets must be kept even after bootup is complete for | 
 | 1813 |  * unused processors and/or zones. They do play a role for bootstrapping | 
 | 1814 |  * hotplugged processors. | 
 | 1815 |  * | 
 | 1816 |  * zoneinfo_show() and maybe other functions do | 
 | 1817 |  * not check if the processor is online before following the pageset pointer. | 
 | 1818 |  * Other parts of the kernel may not check if the zone is available. | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1819 |  */ | 
| Eric Dumazet | 88a2a4ac | 2006-02-04 23:27:36 -0800 | [diff] [blame] | 1820 | static struct per_cpu_pageset boot_pageset[NR_CPUS]; | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1821 |  | 
 | 1822 | /* | 
 | 1823 |  * Dynamically allocate memory for the | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1824 |  * per cpu pageset array in struct zone. | 
 | 1825 |  */ | 
| Ashok Raj | 6292d9a | 2006-02-01 03:04:44 -0800 | [diff] [blame] | 1826 | static int __cpuinit process_zones(int cpu) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1827 | { | 
 | 1828 | 	struct zone *zone, *dzone; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1829 |  | 
 | 1830 | 	for_each_zone(zone) { | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1831 |  | 
| Christoph Lameter | 66a5503 | 2006-09-27 01:50:09 -0700 | [diff] [blame] | 1832 | 		if (!populated_zone(zone)) | 
 | 1833 | 			continue; | 
 | 1834 |  | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1835 | 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1836 | 					 GFP_KERNEL, cpu_to_node(cpu)); | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1837 | 		if (!zone_pcp(zone, cpu)) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1838 | 			goto bad; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1839 |  | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1840 | 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 1841 |  | 
 | 1842 | 		if (percpu_pagelist_fraction) | 
 | 1843 | 			setup_pagelist_highmark(zone_pcp(zone, cpu), | 
 | 1844 | 			 	(zone->present_pages / percpu_pagelist_fraction)); | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1845 | 	} | 
 | 1846 |  | 
 | 1847 | 	return 0; | 
 | 1848 | bad: | 
 | 1849 | 	for_each_zone(dzone) { | 
 | 1850 | 		if (dzone == zone) | 
 | 1851 | 			break; | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1852 | 		kfree(zone_pcp(dzone, cpu)); | 
 | 1853 | 		zone_pcp(dzone, cpu) = NULL; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1854 | 	} | 
 | 1855 | 	return -ENOMEM; | 
 | 1856 | } | 
 | 1857 |  | 
 | 1858 | static inline void free_zone_pagesets(int cpu) | 
 | 1859 | { | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1860 | 	struct zone *zone; | 
 | 1861 |  | 
 | 1862 | 	for_each_zone(zone) { | 
 | 1863 | 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu); | 
 | 1864 |  | 
| David Rientjes | f3ef9ea | 2006-09-25 16:24:57 -0700 | [diff] [blame] | 1865 | 		/* Free per_cpu_pageset if it is slab allocated */ | 
 | 1866 | 		if (pset != &boot_pageset[cpu]) | 
 | 1867 | 			kfree(pset); | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1868 | 		zone_pcp(zone, cpu) = NULL; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1869 | 	} | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1870 | } | 
 | 1871 |  | 
| Chandra Seetharaman | 9c7b216 | 2006-06-27 02:54:07 -0700 | [diff] [blame] | 1872 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1873 | 		unsigned long action, | 
 | 1874 | 		void *hcpu) | 
 | 1875 | { | 
 | 1876 | 	int cpu = (long)hcpu; | 
 | 1877 | 	int ret = NOTIFY_OK; | 
 | 1878 |  | 
 | 1879 | 	switch (action) { | 
 | 1880 | 		case CPU_UP_PREPARE: | 
 | 1881 | 			if (process_zones(cpu)) | 
 | 1882 | 				ret = NOTIFY_BAD; | 
 | 1883 | 			break; | 
| Andi Kleen | b0d4169 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1884 | 		case CPU_UP_CANCELED: | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1885 | 		case CPU_DEAD: | 
 | 1886 | 			free_zone_pagesets(cpu); | 
 | 1887 | 			break; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1888 | 		default: | 
 | 1889 | 			break; | 
 | 1890 | 	} | 
 | 1891 | 	return ret; | 
 | 1892 | } | 
 | 1893 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 1894 | static struct notifier_block __cpuinitdata pageset_notifier = | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1895 | 	{ &pageset_cpuup_callback, NULL, 0 }; | 
 | 1896 |  | 
| Al Viro | 78d9955 | 2005-12-15 09:18:25 +0000 | [diff] [blame] | 1897 | void __init setup_per_cpu_pageset(void) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1898 | { | 
 | 1899 | 	int err; | 
 | 1900 |  | 
 | 1901 | 	/* Initialize per_cpu_pageset for cpu 0. | 
 | 1902 | 	 * A cpuup callback will do this for every cpu | 
 | 1903 | 	 * as it comes online | 
 | 1904 | 	 */ | 
 | 1905 | 	err = process_zones(smp_processor_id()); | 
 | 1906 | 	BUG_ON(err); | 
 | 1907 | 	register_cpu_notifier(&pageset_notifier); | 
 | 1908 | } | 
 | 1909 |  | 
 | 1910 | #endif | 
 | 1911 |  | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1912 | static __meminit | 
| Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1913 | int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1914 | { | 
 | 1915 | 	int i; | 
 | 1916 | 	struct pglist_data *pgdat = zone->zone_pgdat; | 
| Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1917 | 	size_t alloc_size; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1918 |  | 
 | 1919 | 	/* | 
 | 1920 | 	 * The per-page waitqueue mechanism uses hashed waitqueues | 
 | 1921 | 	 * per zone. | 
 | 1922 | 	 */ | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 1923 | 	zone->wait_table_hash_nr_entries = | 
 | 1924 | 		 wait_table_hash_nr_entries(zone_size_pages); | 
 | 1925 | 	zone->wait_table_bits = | 
 | 1926 | 		wait_table_bits(zone->wait_table_hash_nr_entries); | 
| Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1927 | 	alloc_size = zone->wait_table_hash_nr_entries | 
 | 1928 | 					* sizeof(wait_queue_head_t); | 
 | 1929 |  | 
 | 1930 |  	if (system_state == SYSTEM_BOOTING) { | 
 | 1931 | 		zone->wait_table = (wait_queue_head_t *) | 
 | 1932 | 			alloc_bootmem_node(pgdat, alloc_size); | 
 | 1933 | 	} else { | 
 | 1934 | 		/* | 
 | 1935 | 		 * This case means that a zone whose size was 0 gets new memory | 
 | 1936 | 		 * via memory hot-add. | 
 | 1937 | 		 * But it may be the case that a new node was hot-added.  In | 
 | 1938 | 		 * this case vmalloc() will not be able to use this new node's | 
 | 1939 | 		 * memory - this wait_table must be initialized to use this new | 
 | 1940 | 		 * node itself as well. | 
 | 1941 | 		 * To use this new node's memory, further consideration will be | 
 | 1942 | 		 * necessary. | 
 | 1943 | 		 */ | 
 | 1944 | 		zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); | 
 | 1945 | 	} | 
 | 1946 | 	if (!zone->wait_table) | 
 | 1947 | 		return -ENOMEM; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1948 |  | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 1949 | 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1950 | 		init_waitqueue_head(zone->wait_table + i); | 
| Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1951 |  | 
 | 1952 | 	return 0; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1953 | } | 
 | 1954 |  | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1955 | static __meminit void zone_pcp_init(struct zone *zone) | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1956 | { | 
 | 1957 | 	int cpu; | 
 | 1958 | 	unsigned long batch = zone_batchsize(zone); | 
 | 1959 |  | 
 | 1960 | 	for (cpu = 0; cpu < NR_CPUS; cpu++) { | 
 | 1961 | #ifdef CONFIG_NUMA | 
 | 1962 | 		/* Early boot. Slab allocator not functional yet */ | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1963 | 		zone_pcp(zone, cpu) = &boot_pageset[cpu]; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1964 | 		setup_pageset(&boot_pageset[cpu],0); | 
 | 1965 | #else | 
 | 1966 | 		setup_pageset(zone_pcp(zone,cpu), batch); | 
 | 1967 | #endif | 
 | 1968 | 	} | 
| Anton Blanchard | f5335c0 | 2006-03-25 03:06:49 -0800 | [diff] [blame] | 1969 | 	if (zone->present_pages) | 
 | 1970 | 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n", | 
 | 1971 | 			zone->name, zone->present_pages, batch); | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1972 | } | 
 | 1973 |  | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1974 | __meminit int init_currently_empty_zone(struct zone *zone, | 
 | 1975 | 					unsigned long zone_start_pfn, | 
 | 1976 | 					unsigned long size) | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1977 | { | 
 | 1978 | 	struct pglist_data *pgdat = zone->zone_pgdat; | 
| Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1979 | 	int ret; | 
 | 1980 | 	ret = zone_wait_table_init(zone, size); | 
 | 1981 | 	if (ret) | 
 | 1982 | 		return ret; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1983 | 	pgdat->nr_zones = zone_idx(zone) + 1; | 
 | 1984 |  | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1985 | 	zone->zone_start_pfn = zone_start_pfn; | 
 | 1986 |  | 
 | 1987 | 	memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); | 
 | 1988 |  | 
 | 1989 | 	zone_init_free_lists(pgdat, zone, zone->spanned_pages); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1990 |  | 
 | 1991 | 	return 0; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1992 | } | 
 | 1993 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 1994 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 
 | 1995 | /* | 
 | 1996 |  * Basic iterator support. Return the first range of PFNs for a node | 
 | 1997 |  * Note: nid == MAX_NUMNODES returns first region regardless of node | 
 | 1998 |  */ | 
 | 1999 | static int __init first_active_region_index_in_nid(int nid) | 
 | 2000 | { | 
 | 2001 | 	int i; | 
 | 2002 |  | 
 | 2003 | 	for (i = 0; i < nr_nodemap_entries; i++) | 
 | 2004 | 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) | 
 | 2005 | 			return i; | 
 | 2006 |  | 
 | 2007 | 	return -1; | 
 | 2008 | } | 
 | 2009 |  | 
 | 2010 | /* | 
 | 2011 |  * Basic iterator support. Return the next active range of PFNs for a node | 
 | 2012 |  * Note: nid == MAX_NUMNODES returns next region regardles of node | 
 | 2013 |  */ | 
 | 2014 | static int __init next_active_region_index_in_nid(int index, int nid) | 
 | 2015 | { | 
 | 2016 | 	for (index = index + 1; index < nr_nodemap_entries; index++) | 
 | 2017 | 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) | 
 | 2018 | 			return index; | 
 | 2019 |  | 
 | 2020 | 	return -1; | 
 | 2021 | } | 
 | 2022 |  | 
 | 2023 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | 
 | 2024 | /* | 
 | 2025 |  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. | 
 | 2026 |  * Architectures may implement their own version but if add_active_range() | 
 | 2027 |  * was used and there are no special requirements, this is a convenient | 
 | 2028 |  * alternative | 
 | 2029 |  */ | 
 | 2030 | int __init early_pfn_to_nid(unsigned long pfn) | 
 | 2031 | { | 
 | 2032 | 	int i; | 
 | 2033 |  | 
 | 2034 | 	for (i = 0; i < nr_nodemap_entries; i++) { | 
 | 2035 | 		unsigned long start_pfn = early_node_map[i].start_pfn; | 
 | 2036 | 		unsigned long end_pfn = early_node_map[i].end_pfn; | 
 | 2037 |  | 
 | 2038 | 		if (start_pfn <= pfn && pfn < end_pfn) | 
 | 2039 | 			return early_node_map[i].nid; | 
 | 2040 | 	} | 
 | 2041 |  | 
 | 2042 | 	return 0; | 
 | 2043 | } | 
 | 2044 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | 
 | 2045 |  | 
 | 2046 | /* Basic iterator support to walk early_node_map[] */ | 
 | 2047 | #define for_each_active_range_index_in_nid(i, nid) \ | 
 | 2048 | 	for (i = first_active_region_index_in_nid(nid); i != -1; \ | 
 | 2049 | 				i = next_active_region_index_in_nid(i, nid)) | 
 | 2050 |  | 
 | 2051 | /** | 
 | 2052 |  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range | 
 | 2053 |  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed | 
 | 2054 |  * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node | 
 | 2055 |  * | 
 | 2056 |  * If an architecture guarantees that all ranges registered with | 
 | 2057 |  * add_active_ranges() contain no holes and may be freed, this | 
 | 2058 |  * this function may be used instead of calling free_bootmem() manually. | 
 | 2059 |  */ | 
 | 2060 | void __init free_bootmem_with_active_regions(int nid, | 
 | 2061 | 						unsigned long max_low_pfn) | 
 | 2062 | { | 
 | 2063 | 	int i; | 
 | 2064 |  | 
 | 2065 | 	for_each_active_range_index_in_nid(i, nid) { | 
 | 2066 | 		unsigned long size_pages = 0; | 
 | 2067 | 		unsigned long end_pfn = early_node_map[i].end_pfn; | 
 | 2068 |  | 
 | 2069 | 		if (early_node_map[i].start_pfn >= max_low_pfn) | 
 | 2070 | 			continue; | 
 | 2071 |  | 
 | 2072 | 		if (end_pfn > max_low_pfn) | 
 | 2073 | 			end_pfn = max_low_pfn; | 
 | 2074 |  | 
 | 2075 | 		size_pages = end_pfn - early_node_map[i].start_pfn; | 
 | 2076 | 		free_bootmem_node(NODE_DATA(early_node_map[i].nid), | 
 | 2077 | 				PFN_PHYS(early_node_map[i].start_pfn), | 
 | 2078 | 				size_pages << PAGE_SHIFT); | 
 | 2079 | 	} | 
 | 2080 | } | 
 | 2081 |  | 
 | 2082 | /** | 
 | 2083 |  * sparse_memory_present_with_active_regions - Call memory_present for each active range | 
 | 2084 |  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used | 
 | 2085 |  * | 
 | 2086 |  * If an architecture guarantees that all ranges registered with | 
 | 2087 |  * add_active_ranges() contain no holes and may be freed, this | 
 | 2088 |  * this function may be used instead of calling memory_present() manually. | 
 | 2089 |  */ | 
 | 2090 | void __init sparse_memory_present_with_active_regions(int nid) | 
 | 2091 | { | 
 | 2092 | 	int i; | 
 | 2093 |  | 
 | 2094 | 	for_each_active_range_index_in_nid(i, nid) | 
 | 2095 | 		memory_present(early_node_map[i].nid, | 
 | 2096 | 				early_node_map[i].start_pfn, | 
 | 2097 | 				early_node_map[i].end_pfn); | 
 | 2098 | } | 
 | 2099 |  | 
 | 2100 | /** | 
| Mel Gorman | fb01439 | 2006-09-27 01:49:59 -0700 | [diff] [blame] | 2101 |  * push_node_boundaries - Push node boundaries to at least the requested boundary | 
 | 2102 |  * @nid: The nid of the node to push the boundary for | 
 | 2103 |  * @start_pfn: The start pfn of the node | 
 | 2104 |  * @end_pfn: The end pfn of the node | 
 | 2105 |  * | 
 | 2106 |  * In reserve-based hot-add, mem_map is allocated that is unused until hotadd | 
 | 2107 |  * time. Specifically, on x86_64, SRAT will report ranges that can potentially | 
 | 2108 |  * be hotplugged even though no physical memory exists. This function allows | 
 | 2109 |  * an arch to push out the node boundaries so mem_map is allocated that can | 
 | 2110 |  * be used later. | 
 | 2111 |  */ | 
 | 2112 | #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE | 
 | 2113 | void __init push_node_boundaries(unsigned int nid, | 
 | 2114 | 		unsigned long start_pfn, unsigned long end_pfn) | 
 | 2115 | { | 
 | 2116 | 	printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", | 
 | 2117 | 			nid, start_pfn, end_pfn); | 
 | 2118 |  | 
 | 2119 | 	/* Initialise the boundary for this node if necessary */ | 
 | 2120 | 	if (node_boundary_end_pfn[nid] == 0) | 
 | 2121 | 		node_boundary_start_pfn[nid] = -1UL; | 
 | 2122 |  | 
 | 2123 | 	/* Update the boundaries */ | 
 | 2124 | 	if (node_boundary_start_pfn[nid] > start_pfn) | 
 | 2125 | 		node_boundary_start_pfn[nid] = start_pfn; | 
 | 2126 | 	if (node_boundary_end_pfn[nid] < end_pfn) | 
 | 2127 | 		node_boundary_end_pfn[nid] = end_pfn; | 
 | 2128 | } | 
 | 2129 |  | 
 | 2130 | /* If necessary, push the node boundary out for reserve hotadd */ | 
 | 2131 | static void __init account_node_boundary(unsigned int nid, | 
 | 2132 | 		unsigned long *start_pfn, unsigned long *end_pfn) | 
 | 2133 | { | 
 | 2134 | 	printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", | 
 | 2135 | 			nid, *start_pfn, *end_pfn); | 
 | 2136 |  | 
 | 2137 | 	/* Return if boundary information has not been provided */ | 
 | 2138 | 	if (node_boundary_end_pfn[nid] == 0) | 
 | 2139 | 		return; | 
 | 2140 |  | 
 | 2141 | 	/* Check the boundaries and update if necessary */ | 
 | 2142 | 	if (node_boundary_start_pfn[nid] < *start_pfn) | 
 | 2143 | 		*start_pfn = node_boundary_start_pfn[nid]; | 
 | 2144 | 	if (node_boundary_end_pfn[nid] > *end_pfn) | 
 | 2145 | 		*end_pfn = node_boundary_end_pfn[nid]; | 
 | 2146 | } | 
 | 2147 | #else | 
 | 2148 | void __init push_node_boundaries(unsigned int nid, | 
 | 2149 | 		unsigned long start_pfn, unsigned long end_pfn) {} | 
 | 2150 |  | 
 | 2151 | static void __init account_node_boundary(unsigned int nid, | 
 | 2152 | 		unsigned long *start_pfn, unsigned long *end_pfn) {} | 
 | 2153 | #endif | 
 | 2154 |  | 
 | 2155 |  | 
 | 2156 | /** | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2157 |  * get_pfn_range_for_nid - Return the start and end page frames for a node | 
 | 2158 |  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned | 
 | 2159 |  * @start_pfn: Passed by reference. On return, it will have the node start_pfn | 
 | 2160 |  * @end_pfn: Passed by reference. On return, it will have the node end_pfn | 
 | 2161 |  * | 
 | 2162 |  * It returns the start and end page frame of a node based on information | 
 | 2163 |  * provided by an arch calling add_active_range(). If called for a node | 
 | 2164 |  * with no available memory, a warning is printed and the start and end | 
 | 2165 |  * PFNs will be 0 | 
 | 2166 |  */ | 
 | 2167 | void __init get_pfn_range_for_nid(unsigned int nid, | 
 | 2168 | 			unsigned long *start_pfn, unsigned long *end_pfn) | 
 | 2169 | { | 
 | 2170 | 	int i; | 
 | 2171 | 	*start_pfn = -1UL; | 
 | 2172 | 	*end_pfn = 0; | 
 | 2173 |  | 
 | 2174 | 	for_each_active_range_index_in_nid(i, nid) { | 
 | 2175 | 		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn); | 
 | 2176 | 		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn); | 
 | 2177 | 	} | 
 | 2178 |  | 
 | 2179 | 	if (*start_pfn == -1UL) { | 
 | 2180 | 		printk(KERN_WARNING "Node %u active with no memory\n", nid); | 
 | 2181 | 		*start_pfn = 0; | 
 | 2182 | 	} | 
| Mel Gorman | fb01439 | 2006-09-27 01:49:59 -0700 | [diff] [blame] | 2183 |  | 
 | 2184 | 	/* Push the node boundaries out if requested */ | 
 | 2185 | 	account_node_boundary(nid, start_pfn, end_pfn); | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2186 | } | 
 | 2187 |  | 
 | 2188 | /* | 
 | 2189 |  * Return the number of pages a zone spans in a node, including holes | 
 | 2190 |  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() | 
 | 2191 |  */ | 
 | 2192 | unsigned long __init zone_spanned_pages_in_node(int nid, | 
 | 2193 | 					unsigned long zone_type, | 
 | 2194 | 					unsigned long *ignored) | 
 | 2195 | { | 
 | 2196 | 	unsigned long node_start_pfn, node_end_pfn; | 
 | 2197 | 	unsigned long zone_start_pfn, zone_end_pfn; | 
 | 2198 |  | 
 | 2199 | 	/* Get the start and end of the node and zone */ | 
 | 2200 | 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); | 
 | 2201 | 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; | 
 | 2202 | 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; | 
 | 2203 |  | 
 | 2204 | 	/* Check that this node has pages within the zone's required range */ | 
 | 2205 | 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) | 
 | 2206 | 		return 0; | 
 | 2207 |  | 
 | 2208 | 	/* Move the zone boundaries inside the node if necessary */ | 
 | 2209 | 	zone_end_pfn = min(zone_end_pfn, node_end_pfn); | 
 | 2210 | 	zone_start_pfn = max(zone_start_pfn, node_start_pfn); | 
 | 2211 |  | 
 | 2212 | 	/* Return the spanned pages */ | 
 | 2213 | 	return zone_end_pfn - zone_start_pfn; | 
 | 2214 | } | 
 | 2215 |  | 
 | 2216 | /* | 
 | 2217 |  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 
 | 2218 |  * then all holes in the requested range will be accounted for | 
 | 2219 |  */ | 
 | 2220 | unsigned long __init __absent_pages_in_range(int nid, | 
 | 2221 | 				unsigned long range_start_pfn, | 
 | 2222 | 				unsigned long range_end_pfn) | 
 | 2223 | { | 
 | 2224 | 	int i = 0; | 
 | 2225 | 	unsigned long prev_end_pfn = 0, hole_pages = 0; | 
 | 2226 | 	unsigned long start_pfn; | 
 | 2227 |  | 
 | 2228 | 	/* Find the end_pfn of the first active range of pfns in the node */ | 
 | 2229 | 	i = first_active_region_index_in_nid(nid); | 
 | 2230 | 	if (i == -1) | 
 | 2231 | 		return 0; | 
 | 2232 |  | 
| Mel Gorman | 9c7cd68 | 2006-09-27 01:49:58 -0700 | [diff] [blame] | 2233 | 	/* Account for ranges before physical memory on this node */ | 
 | 2234 | 	if (early_node_map[i].start_pfn > range_start_pfn) | 
 | 2235 | 		hole_pages = early_node_map[i].start_pfn - range_start_pfn; | 
 | 2236 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2237 | 	prev_end_pfn = early_node_map[i].start_pfn; | 
 | 2238 |  | 
 | 2239 | 	/* Find all holes for the zone within the node */ | 
 | 2240 | 	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { | 
 | 2241 |  | 
 | 2242 | 		/* No need to continue if prev_end_pfn is outside the zone */ | 
 | 2243 | 		if (prev_end_pfn >= range_end_pfn) | 
 | 2244 | 			break; | 
 | 2245 |  | 
 | 2246 | 		/* Make sure the end of the zone is not within the hole */ | 
 | 2247 | 		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); | 
 | 2248 | 		prev_end_pfn = max(prev_end_pfn, range_start_pfn); | 
 | 2249 |  | 
 | 2250 | 		/* Update the hole size cound and move on */ | 
 | 2251 | 		if (start_pfn > range_start_pfn) { | 
 | 2252 | 			BUG_ON(prev_end_pfn > start_pfn); | 
 | 2253 | 			hole_pages += start_pfn - prev_end_pfn; | 
 | 2254 | 		} | 
 | 2255 | 		prev_end_pfn = early_node_map[i].end_pfn; | 
 | 2256 | 	} | 
 | 2257 |  | 
| Mel Gorman | 9c7cd68 | 2006-09-27 01:49:58 -0700 | [diff] [blame] | 2258 | 	/* Account for ranges past physical memory on this node */ | 
 | 2259 | 	if (range_end_pfn > prev_end_pfn) | 
 | 2260 | 		hole_pages = range_end_pfn - | 
 | 2261 | 				max(range_start_pfn, prev_end_pfn); | 
 | 2262 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2263 | 	return hole_pages; | 
 | 2264 | } | 
 | 2265 |  | 
 | 2266 | /** | 
 | 2267 |  * absent_pages_in_range - Return number of page frames in holes within a range | 
 | 2268 |  * @start_pfn: The start PFN to start searching for holes | 
 | 2269 |  * @end_pfn: The end PFN to stop searching for holes | 
 | 2270 |  * | 
 | 2271 |  * It returns the number of pages frames in memory holes within a range | 
 | 2272 |  */ | 
 | 2273 | unsigned long __init absent_pages_in_range(unsigned long start_pfn, | 
 | 2274 | 							unsigned long end_pfn) | 
 | 2275 | { | 
 | 2276 | 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); | 
 | 2277 | } | 
 | 2278 |  | 
 | 2279 | /* Return the number of page frames in holes in a zone on a node */ | 
 | 2280 | unsigned long __init zone_absent_pages_in_node(int nid, | 
 | 2281 | 					unsigned long zone_type, | 
 | 2282 | 					unsigned long *ignored) | 
 | 2283 | { | 
| Mel Gorman | 9c7cd68 | 2006-09-27 01:49:58 -0700 | [diff] [blame] | 2284 | 	unsigned long node_start_pfn, node_end_pfn; | 
 | 2285 | 	unsigned long zone_start_pfn, zone_end_pfn; | 
 | 2286 |  | 
 | 2287 | 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); | 
 | 2288 | 	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], | 
 | 2289 | 							node_start_pfn); | 
 | 2290 | 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], | 
 | 2291 | 							node_end_pfn); | 
 | 2292 |  | 
 | 2293 | 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2294 | } | 
| Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 2295 |  | 
 | 2296 | /* Return the zone index a PFN is in */ | 
 | 2297 | int memmap_zone_idx(struct page *lmem_map) | 
 | 2298 | { | 
 | 2299 | 	int i; | 
 | 2300 | 	unsigned long phys_addr = virt_to_phys(lmem_map); | 
 | 2301 | 	unsigned long pfn = phys_addr >> PAGE_SHIFT; | 
 | 2302 |  | 
 | 2303 | 	for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 2304 | 		if (pfn < arch_zone_highest_possible_pfn[i]) | 
 | 2305 | 			break; | 
 | 2306 |  | 
 | 2307 | 	return i; | 
 | 2308 | } | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2309 | #else | 
 | 2310 | static inline unsigned long zone_spanned_pages_in_node(int nid, | 
 | 2311 | 					unsigned long zone_type, | 
 | 2312 | 					unsigned long *zones_size) | 
 | 2313 | { | 
 | 2314 | 	return zones_size[zone_type]; | 
 | 2315 | } | 
 | 2316 |  | 
 | 2317 | static inline unsigned long zone_absent_pages_in_node(int nid, | 
 | 2318 | 						unsigned long zone_type, | 
 | 2319 | 						unsigned long *zholes_size) | 
 | 2320 | { | 
 | 2321 | 	if (!zholes_size) | 
 | 2322 | 		return 0; | 
 | 2323 |  | 
 | 2324 | 	return zholes_size[zone_type]; | 
 | 2325 | } | 
| Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 2326 |  | 
 | 2327 | static inline int memmap_zone_idx(struct page *lmem_map) | 
 | 2328 | { | 
 | 2329 | 	return MAX_NR_ZONES; | 
 | 2330 | } | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2331 | #endif | 
 | 2332 |  | 
 | 2333 | static void __init calculate_node_totalpages(struct pglist_data *pgdat, | 
 | 2334 | 		unsigned long *zones_size, unsigned long *zholes_size) | 
 | 2335 | { | 
 | 2336 | 	unsigned long realtotalpages, totalpages = 0; | 
 | 2337 | 	enum zone_type i; | 
 | 2338 |  | 
 | 2339 | 	for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 2340 | 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, | 
 | 2341 | 								zones_size); | 
 | 2342 | 	pgdat->node_spanned_pages = totalpages; | 
 | 2343 |  | 
 | 2344 | 	realtotalpages = totalpages; | 
 | 2345 | 	for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 2346 | 		realtotalpages -= | 
 | 2347 | 			zone_absent_pages_in_node(pgdat->node_id, i, | 
 | 2348 | 								zholes_size); | 
 | 2349 | 	pgdat->node_present_pages = realtotalpages; | 
 | 2350 | 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, | 
 | 2351 | 							realtotalpages); | 
 | 2352 | } | 
 | 2353 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2354 | /* | 
 | 2355 |  * Set up the zone data structures: | 
 | 2356 |  *   - mark all pages reserved | 
 | 2357 |  *   - mark all memory queues empty | 
 | 2358 |  *   - clear the memory bitmaps | 
 | 2359 |  */ | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 2360 | static void __meminit free_area_init_core(struct pglist_data *pgdat, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2361 | 		unsigned long *zones_size, unsigned long *zholes_size) | 
 | 2362 | { | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 2363 | 	enum zone_type j; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2364 | 	int nid = pgdat->node_id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2365 | 	unsigned long zone_start_pfn = pgdat->node_start_pfn; | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 2366 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2367 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 2368 | 	pgdat_resize_init(pgdat); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2369 | 	pgdat->nr_zones = 0; | 
 | 2370 | 	init_waitqueue_head(&pgdat->kswapd_wait); | 
 | 2371 | 	pgdat->kswapd_max_order = 0; | 
 | 2372 | 	 | 
 | 2373 | 	for (j = 0; j < MAX_NR_ZONES; j++) { | 
 | 2374 | 		struct zone *zone = pgdat->node_zones + j; | 
| Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 2375 | 		unsigned long size, realsize, memmap_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2376 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2377 | 		size = zone_spanned_pages_in_node(nid, j, zones_size); | 
 | 2378 | 		realsize = size - zone_absent_pages_in_node(nid, j, | 
 | 2379 | 								zholes_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2380 |  | 
| Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 2381 | 		/* | 
 | 2382 | 		 * Adjust realsize so that it accounts for how much memory | 
 | 2383 | 		 * is used by this zone for memmap. This affects the watermark | 
 | 2384 | 		 * and per-cpu initialisations | 
 | 2385 | 		 */ | 
 | 2386 | 		memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; | 
 | 2387 | 		if (realsize >= memmap_pages) { | 
 | 2388 | 			realsize -= memmap_pages; | 
 | 2389 | 			printk(KERN_DEBUG | 
 | 2390 | 				"  %s zone: %lu pages used for memmap\n", | 
 | 2391 | 				zone_names[j], memmap_pages); | 
 | 2392 | 		} else | 
 | 2393 | 			printk(KERN_WARNING | 
 | 2394 | 				"  %s zone: %lu pages exceeds realsize %lu\n", | 
 | 2395 | 				zone_names[j], memmap_pages, realsize); | 
 | 2396 |  | 
 | 2397 | 		/* Account for reserved DMA pages */ | 
 | 2398 | 		if (j == ZONE_DMA && realsize > dma_reserve) { | 
 | 2399 | 			realsize -= dma_reserve; | 
 | 2400 | 			printk(KERN_DEBUG "  DMA zone: %lu pages reserved\n", | 
 | 2401 | 								dma_reserve); | 
 | 2402 | 		} | 
 | 2403 |  | 
| Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 2404 | 		if (!is_highmem_idx(j)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2405 | 			nr_kernel_pages += realsize; | 
 | 2406 | 		nr_all_pages += realsize; | 
 | 2407 |  | 
 | 2408 | 		zone->spanned_pages = size; | 
 | 2409 | 		zone->present_pages = realsize; | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2410 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | d5f541e | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 2411 | 		zone->node = nid; | 
| Christoph Lameter | 8417bba | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 2412 | 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2413 | 						/ 100; | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2414 | 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2415 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2416 | 		zone->name = zone_names[j]; | 
 | 2417 | 		spin_lock_init(&zone->lock); | 
 | 2418 | 		spin_lock_init(&zone->lru_lock); | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 2419 | 		zone_seqlock_init(zone); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2420 | 		zone->zone_pgdat = pgdat; | 
 | 2421 | 		zone->free_pages = 0; | 
 | 2422 |  | 
 | 2423 | 		zone->temp_priority = zone->prev_priority = DEF_PRIORITY; | 
 | 2424 |  | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2425 | 		zone_pcp_init(zone); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2426 | 		INIT_LIST_HEAD(&zone->active_list); | 
 | 2427 | 		INIT_LIST_HEAD(&zone->inactive_list); | 
 | 2428 | 		zone->nr_scan_active = 0; | 
 | 2429 | 		zone->nr_scan_inactive = 0; | 
 | 2430 | 		zone->nr_active = 0; | 
 | 2431 | 		zone->nr_inactive = 0; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 2432 | 		zap_zone_vm_stats(zone); | 
| Martin Hicks | 53e9a61 | 2005-09-03 15:54:51 -0700 | [diff] [blame] | 2433 | 		atomic_set(&zone->reclaim_in_progress, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2434 | 		if (!size) | 
 | 2435 | 			continue; | 
 | 2436 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2437 | 		zonetable_add(zone, nid, j, zone_start_pfn, size); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 2438 | 		ret = init_currently_empty_zone(zone, zone_start_pfn, size); | 
 | 2439 | 		BUG_ON(ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2440 | 		zone_start_pfn += size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2441 | 	} | 
 | 2442 | } | 
 | 2443 |  | 
 | 2444 | static void __init alloc_node_mem_map(struct pglist_data *pgdat) | 
 | 2445 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2446 | 	/* Skip empty nodes */ | 
 | 2447 | 	if (!pgdat->node_spanned_pages) | 
 | 2448 | 		return; | 
 | 2449 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2450 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2451 | 	/* ia64 gets its own node_mem_map, before this, without bootmem */ | 
 | 2452 | 	if (!pgdat->node_mem_map) { | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 2453 | 		unsigned long size, start, end; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2454 | 		struct page *map; | 
 | 2455 |  | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 2456 | 		/* | 
 | 2457 | 		 * The zone's endpoints aren't required to be MAX_ORDER | 
 | 2458 | 		 * aligned but the node_mem_map endpoints must be in order | 
 | 2459 | 		 * for the buddy allocator to function correctly. | 
 | 2460 | 		 */ | 
 | 2461 | 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); | 
 | 2462 | 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages; | 
 | 2463 | 		end = ALIGN(end, MAX_ORDER_NR_PAGES); | 
 | 2464 | 		size =  (end - start) * sizeof(struct page); | 
| Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 2465 | 		map = alloc_remap(pgdat->node_id, size); | 
 | 2466 | 		if (!map) | 
 | 2467 | 			map = alloc_bootmem_node(pgdat, size); | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 2468 | 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2469 | 	} | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2470 | #ifdef CONFIG_FLATMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2471 | 	/* | 
 | 2472 | 	 * With no DISCONTIG, the global mem_map is just set as node 0's | 
 | 2473 | 	 */ | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2474 | 	if (pgdat == NODE_DATA(0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2475 | 		mem_map = NODE_DATA(0)->node_mem_map; | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2476 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 
 | 2477 | 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn) | 
 | 2478 | 			mem_map -= pgdat->node_start_pfn; | 
 | 2479 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 
 | 2480 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2481 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2482 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2483 | } | 
 | 2484 |  | 
| Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 2485 | void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2486 | 		unsigned long *zones_size, unsigned long node_start_pfn, | 
 | 2487 | 		unsigned long *zholes_size) | 
 | 2488 | { | 
 | 2489 | 	pgdat->node_id = nid; | 
 | 2490 | 	pgdat->node_start_pfn = node_start_pfn; | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2491 | 	calculate_node_totalpages(pgdat, zones_size, zholes_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 |  | 
 | 2493 | 	alloc_node_mem_map(pgdat); | 
 | 2494 |  | 
 | 2495 | 	free_area_init_core(pgdat, zones_size, zholes_size); | 
 | 2496 | } | 
 | 2497 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2498 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 
 | 2499 | /** | 
 | 2500 |  * add_active_range - Register a range of PFNs backed by physical memory | 
 | 2501 |  * @nid: The node ID the range resides on | 
 | 2502 |  * @start_pfn: The start PFN of the available physical memory | 
 | 2503 |  * @end_pfn: The end PFN of the available physical memory | 
 | 2504 |  * | 
 | 2505 |  * These ranges are stored in an early_node_map[] and later used by | 
 | 2506 |  * free_area_init_nodes() to calculate zone sizes and holes. If the | 
 | 2507 |  * range spans a memory hole, it is up to the architecture to ensure | 
 | 2508 |  * the memory is not freed by the bootmem allocator. If possible | 
 | 2509 |  * the range being registered will be merged with existing ranges. | 
 | 2510 |  */ | 
 | 2511 | void __init add_active_range(unsigned int nid, unsigned long start_pfn, | 
 | 2512 | 						unsigned long end_pfn) | 
 | 2513 | { | 
 | 2514 | 	int i; | 
 | 2515 |  | 
 | 2516 | 	printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " | 
 | 2517 | 			  "%d entries of %d used\n", | 
 | 2518 | 			  nid, start_pfn, end_pfn, | 
 | 2519 | 			  nr_nodemap_entries, MAX_ACTIVE_REGIONS); | 
 | 2520 |  | 
 | 2521 | 	/* Merge with existing active regions if possible */ | 
 | 2522 | 	for (i = 0; i < nr_nodemap_entries; i++) { | 
 | 2523 | 		if (early_node_map[i].nid != nid) | 
 | 2524 | 			continue; | 
 | 2525 |  | 
 | 2526 | 		/* Skip if an existing region covers this new one */ | 
 | 2527 | 		if (start_pfn >= early_node_map[i].start_pfn && | 
 | 2528 | 				end_pfn <= early_node_map[i].end_pfn) | 
 | 2529 | 			return; | 
 | 2530 |  | 
 | 2531 | 		/* Merge forward if suitable */ | 
 | 2532 | 		if (start_pfn <= early_node_map[i].end_pfn && | 
 | 2533 | 				end_pfn > early_node_map[i].end_pfn) { | 
 | 2534 | 			early_node_map[i].end_pfn = end_pfn; | 
 | 2535 | 			return; | 
 | 2536 | 		} | 
 | 2537 |  | 
 | 2538 | 		/* Merge backward if suitable */ | 
 | 2539 | 		if (start_pfn < early_node_map[i].end_pfn && | 
 | 2540 | 				end_pfn >= early_node_map[i].start_pfn) { | 
 | 2541 | 			early_node_map[i].start_pfn = start_pfn; | 
 | 2542 | 			return; | 
 | 2543 | 		} | 
 | 2544 | 	} | 
 | 2545 |  | 
 | 2546 | 	/* Check that early_node_map is large enough */ | 
 | 2547 | 	if (i >= MAX_ACTIVE_REGIONS) { | 
 | 2548 | 		printk(KERN_CRIT "More than %d memory regions, truncating\n", | 
 | 2549 | 							MAX_ACTIVE_REGIONS); | 
 | 2550 | 		return; | 
 | 2551 | 	} | 
 | 2552 |  | 
 | 2553 | 	early_node_map[i].nid = nid; | 
 | 2554 | 	early_node_map[i].start_pfn = start_pfn; | 
 | 2555 | 	early_node_map[i].end_pfn = end_pfn; | 
 | 2556 | 	nr_nodemap_entries = i + 1; | 
 | 2557 | } | 
 | 2558 |  | 
 | 2559 | /** | 
 | 2560 |  * shrink_active_range - Shrink an existing registered range of PFNs | 
 | 2561 |  * @nid: The node id the range is on that should be shrunk | 
 | 2562 |  * @old_end_pfn: The old end PFN of the range | 
 | 2563 |  * @new_end_pfn: The new PFN of the range | 
 | 2564 |  * | 
 | 2565 |  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. | 
 | 2566 |  * The map is kept at the end physical page range that has already been | 
 | 2567 |  * registered with add_active_range(). This function allows an arch to shrink | 
 | 2568 |  * an existing registered range. | 
 | 2569 |  */ | 
 | 2570 | void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | 
 | 2571 | 						unsigned long new_end_pfn) | 
 | 2572 | { | 
 | 2573 | 	int i; | 
 | 2574 |  | 
 | 2575 | 	/* Find the old active region end and shrink */ | 
 | 2576 | 	for_each_active_range_index_in_nid(i, nid) | 
 | 2577 | 		if (early_node_map[i].end_pfn == old_end_pfn) { | 
 | 2578 | 			early_node_map[i].end_pfn = new_end_pfn; | 
 | 2579 | 			break; | 
 | 2580 | 		} | 
 | 2581 | } | 
 | 2582 |  | 
 | 2583 | /** | 
 | 2584 |  * remove_all_active_ranges - Remove all currently registered regions | 
 | 2585 |  * During discovery, it may be found that a table like SRAT is invalid | 
 | 2586 |  * and an alternative discovery method must be used. This function removes | 
 | 2587 |  * all currently registered regions. | 
 | 2588 |  */ | 
 | 2589 | void __init remove_all_active_ranges() | 
 | 2590 | { | 
 | 2591 | 	memset(early_node_map, 0, sizeof(early_node_map)); | 
 | 2592 | 	nr_nodemap_entries = 0; | 
| Mel Gorman | fb01439 | 2006-09-27 01:49:59 -0700 | [diff] [blame] | 2593 | #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE | 
 | 2594 | 	memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); | 
 | 2595 | 	memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); | 
 | 2596 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 2597 | } | 
 | 2598 |  | 
 | 2599 | /* Compare two active node_active_regions */ | 
 | 2600 | static int __init cmp_node_active_region(const void *a, const void *b) | 
 | 2601 | { | 
 | 2602 | 	struct node_active_region *arange = (struct node_active_region *)a; | 
 | 2603 | 	struct node_active_region *brange = (struct node_active_region *)b; | 
 | 2604 |  | 
 | 2605 | 	/* Done this way to avoid overflows */ | 
 | 2606 | 	if (arange->start_pfn > brange->start_pfn) | 
 | 2607 | 		return 1; | 
 | 2608 | 	if (arange->start_pfn < brange->start_pfn) | 
 | 2609 | 		return -1; | 
 | 2610 |  | 
 | 2611 | 	return 0; | 
 | 2612 | } | 
 | 2613 |  | 
 | 2614 | /* sort the node_map by start_pfn */ | 
 | 2615 | static void __init sort_node_map(void) | 
 | 2616 | { | 
 | 2617 | 	sort(early_node_map, (size_t)nr_nodemap_entries, | 
 | 2618 | 			sizeof(struct node_active_region), | 
 | 2619 | 			cmp_node_active_region, NULL); | 
 | 2620 | } | 
 | 2621 |  | 
 | 2622 | /* Find the lowest pfn for a node. This depends on a sorted early_node_map */ | 
 | 2623 | unsigned long __init find_min_pfn_for_node(unsigned long nid) | 
 | 2624 | { | 
 | 2625 | 	int i; | 
 | 2626 |  | 
 | 2627 | 	/* Assuming a sorted map, the first range found has the starting pfn */ | 
 | 2628 | 	for_each_active_range_index_in_nid(i, nid) | 
 | 2629 | 		return early_node_map[i].start_pfn; | 
 | 2630 |  | 
 | 2631 | 	printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid); | 
 | 2632 | 	return 0; | 
 | 2633 | } | 
 | 2634 |  | 
 | 2635 | /** | 
 | 2636 |  * find_min_pfn_with_active_regions - Find the minimum PFN registered | 
 | 2637 |  * | 
 | 2638 |  * It returns the minimum PFN based on information provided via | 
 | 2639 |  * add_active_range() | 
 | 2640 |  */ | 
 | 2641 | unsigned long __init find_min_pfn_with_active_regions(void) | 
 | 2642 | { | 
 | 2643 | 	return find_min_pfn_for_node(MAX_NUMNODES); | 
 | 2644 | } | 
 | 2645 |  | 
 | 2646 | /** | 
 | 2647 |  * find_max_pfn_with_active_regions - Find the maximum PFN registered | 
 | 2648 |  * | 
 | 2649 |  * It returns the maximum PFN based on information provided via | 
 | 2650 |  * add_active_range() | 
 | 2651 |  */ | 
 | 2652 | unsigned long __init find_max_pfn_with_active_regions(void) | 
 | 2653 | { | 
 | 2654 | 	int i; | 
 | 2655 | 	unsigned long max_pfn = 0; | 
 | 2656 |  | 
 | 2657 | 	for (i = 0; i < nr_nodemap_entries; i++) | 
 | 2658 | 		max_pfn = max(max_pfn, early_node_map[i].end_pfn); | 
 | 2659 |  | 
 | 2660 | 	return max_pfn; | 
 | 2661 | } | 
 | 2662 |  | 
 | 2663 | /** | 
 | 2664 |  * free_area_init_nodes - Initialise all pg_data_t and zone data | 
 | 2665 |  * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA | 
 | 2666 |  * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32 | 
 | 2667 |  * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL | 
 | 2668 |  * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM | 
 | 2669 |  * | 
 | 2670 |  * This will call free_area_init_node() for each active node in the system. | 
 | 2671 |  * Using the page ranges provided by add_active_range(), the size of each | 
 | 2672 |  * zone in each node and their holes is calculated. If the maximum PFN | 
 | 2673 |  * between two adjacent zones match, it is assumed that the zone is empty. | 
 | 2674 |  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed | 
 | 2675 |  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone | 
 | 2676 |  * starts where the previous one ended. For example, ZONE_DMA32 starts | 
 | 2677 |  * at arch_max_dma_pfn. | 
 | 2678 |  */ | 
 | 2679 | void __init free_area_init_nodes(unsigned long *max_zone_pfn) | 
 | 2680 | { | 
 | 2681 | 	unsigned long nid; | 
 | 2682 | 	enum zone_type i; | 
 | 2683 |  | 
 | 2684 | 	/* Record where the zone boundaries are */ | 
 | 2685 | 	memset(arch_zone_lowest_possible_pfn, 0, | 
 | 2686 | 				sizeof(arch_zone_lowest_possible_pfn)); | 
 | 2687 | 	memset(arch_zone_highest_possible_pfn, 0, | 
 | 2688 | 				sizeof(arch_zone_highest_possible_pfn)); | 
 | 2689 | 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); | 
 | 2690 | 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; | 
 | 2691 | 	for (i = 1; i < MAX_NR_ZONES; i++) { | 
 | 2692 | 		arch_zone_lowest_possible_pfn[i] = | 
 | 2693 | 			arch_zone_highest_possible_pfn[i-1]; | 
 | 2694 | 		arch_zone_highest_possible_pfn[i] = | 
 | 2695 | 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); | 
 | 2696 | 	} | 
 | 2697 |  | 
 | 2698 | 	/* Regions in the early_node_map can be in any order */ | 
 | 2699 | 	sort_node_map(); | 
 | 2700 |  | 
 | 2701 | 	/* Print out the zone ranges */ | 
 | 2702 | 	printk("Zone PFN ranges:\n"); | 
 | 2703 | 	for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 2704 | 		printk("  %-8s %8lu -> %8lu\n", | 
 | 2705 | 				zone_names[i], | 
 | 2706 | 				arch_zone_lowest_possible_pfn[i], | 
 | 2707 | 				arch_zone_highest_possible_pfn[i]); | 
 | 2708 |  | 
 | 2709 | 	/* Print out the early_node_map[] */ | 
 | 2710 | 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); | 
 | 2711 | 	for (i = 0; i < nr_nodemap_entries; i++) | 
 | 2712 | 		printk("  %3d: %8lu -> %8lu\n", early_node_map[i].nid, | 
 | 2713 | 						early_node_map[i].start_pfn, | 
 | 2714 | 						early_node_map[i].end_pfn); | 
 | 2715 |  | 
 | 2716 | 	/* Initialise every node */ | 
 | 2717 | 	for_each_online_node(nid) { | 
 | 2718 | 		pg_data_t *pgdat = NODE_DATA(nid); | 
 | 2719 | 		free_area_init_node(nid, pgdat, NULL, | 
 | 2720 | 				find_min_pfn_for_node(nid), NULL); | 
 | 2721 | 	} | 
 | 2722 | } | 
 | 2723 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 
 | 2724 |  | 
| Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 2725 | /** | 
 | 2726 |  * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA | 
 | 2727 |  * @new_dma_reserve - The number of pages to mark reserved | 
 | 2728 |  * | 
 | 2729 |  * The per-cpu batchsize and zone watermarks are determined by present_pages. | 
 | 2730 |  * In the DMA zone, a significant percentage may be consumed by kernel image | 
 | 2731 |  * and other unfreeable allocations which can skew the watermarks badly. This | 
 | 2732 |  * function may optionally be used to account for unfreeable pages in | 
 | 2733 |  * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize | 
 | 2734 |  */ | 
 | 2735 | void __init set_dma_reserve(unsigned long new_dma_reserve) | 
 | 2736 | { | 
 | 2737 | 	dma_reserve = new_dma_reserve; | 
 | 2738 | } | 
 | 2739 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2740 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2741 | static bootmem_data_t contig_bootmem_data; | 
 | 2742 | struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; | 
 | 2743 |  | 
 | 2744 | EXPORT_SYMBOL(contig_page_data); | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2745 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2746 |  | 
 | 2747 | void __init free_area_init(unsigned long *zones_size) | 
 | 2748 | { | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2749 | 	free_area_init_node(0, NODE_DATA(0), zones_size, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2750 | 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); | 
 | 2751 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2752 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2753 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 2754 | static int page_alloc_cpu_notify(struct notifier_block *self, | 
 | 2755 | 				 unsigned long action, void *hcpu) | 
 | 2756 | { | 
 | 2757 | 	int cpu = (unsigned long)hcpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2758 |  | 
 | 2759 | 	if (action == CPU_DEAD) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2760 | 		local_irq_disable(); | 
 | 2761 | 		__drain_pages(cpu); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2762 | 		vm_events_fold_cpu(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2763 | 		local_irq_enable(); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 2764 | 		refresh_cpu_vm_stats(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2765 | 	} | 
 | 2766 | 	return NOTIFY_OK; | 
 | 2767 | } | 
 | 2768 | #endif /* CONFIG_HOTPLUG_CPU */ | 
 | 2769 |  | 
 | 2770 | void __init page_alloc_init(void) | 
 | 2771 | { | 
 | 2772 | 	hotcpu_notifier(page_alloc_cpu_notify, 0); | 
 | 2773 | } | 
 | 2774 |  | 
 | 2775 | /* | 
| Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 2776 |  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio | 
 | 2777 |  *	or min_free_kbytes changes. | 
 | 2778 |  */ | 
 | 2779 | static void calculate_totalreserve_pages(void) | 
 | 2780 | { | 
 | 2781 | 	struct pglist_data *pgdat; | 
 | 2782 | 	unsigned long reserve_pages = 0; | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 2783 | 	enum zone_type i, j; | 
| Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 2784 |  | 
 | 2785 | 	for_each_online_pgdat(pgdat) { | 
 | 2786 | 		for (i = 0; i < MAX_NR_ZONES; i++) { | 
 | 2787 | 			struct zone *zone = pgdat->node_zones + i; | 
 | 2788 | 			unsigned long max = 0; | 
 | 2789 |  | 
 | 2790 | 			/* Find valid and maximum lowmem_reserve in the zone */ | 
 | 2791 | 			for (j = i; j < MAX_NR_ZONES; j++) { | 
 | 2792 | 				if (zone->lowmem_reserve[j] > max) | 
 | 2793 | 					max = zone->lowmem_reserve[j]; | 
 | 2794 | 			} | 
 | 2795 |  | 
 | 2796 | 			/* we treat pages_high as reserved pages. */ | 
 | 2797 | 			max += zone->pages_high; | 
 | 2798 |  | 
 | 2799 | 			if (max > zone->present_pages) | 
 | 2800 | 				max = zone->present_pages; | 
 | 2801 | 			reserve_pages += max; | 
 | 2802 | 		} | 
 | 2803 | 	} | 
 | 2804 | 	totalreserve_pages = reserve_pages; | 
 | 2805 | } | 
 | 2806 |  | 
 | 2807 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2808 |  * setup_per_zone_lowmem_reserve - called whenever | 
 | 2809 |  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone | 
 | 2810 |  *	has a correct pages reserved value, so an adequate number of | 
 | 2811 |  *	pages are left in the zone after a successful __alloc_pages(). | 
 | 2812 |  */ | 
 | 2813 | static void setup_per_zone_lowmem_reserve(void) | 
 | 2814 | { | 
 | 2815 | 	struct pglist_data *pgdat; | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 2816 | 	enum zone_type j, idx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2817 |  | 
| KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 2818 | 	for_each_online_pgdat(pgdat) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2819 | 		for (j = 0; j < MAX_NR_ZONES; j++) { | 
 | 2820 | 			struct zone *zone = pgdat->node_zones + j; | 
 | 2821 | 			unsigned long present_pages = zone->present_pages; | 
 | 2822 |  | 
 | 2823 | 			zone->lowmem_reserve[j] = 0; | 
 | 2824 |  | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 2825 | 			idx = j; | 
 | 2826 | 			while (idx) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2827 | 				struct zone *lower_zone; | 
 | 2828 |  | 
| Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 2829 | 				idx--; | 
 | 2830 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2831 | 				if (sysctl_lowmem_reserve_ratio[idx] < 1) | 
 | 2832 | 					sysctl_lowmem_reserve_ratio[idx] = 1; | 
 | 2833 |  | 
 | 2834 | 				lower_zone = pgdat->node_zones + idx; | 
 | 2835 | 				lower_zone->lowmem_reserve[j] = present_pages / | 
 | 2836 | 					sysctl_lowmem_reserve_ratio[idx]; | 
 | 2837 | 				present_pages += lower_zone->present_pages; | 
 | 2838 | 			} | 
 | 2839 | 		} | 
 | 2840 | 	} | 
| Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 2841 |  | 
 | 2842 | 	/* update totalreserve_pages */ | 
 | 2843 | 	calculate_totalreserve_pages(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | } | 
 | 2845 |  | 
 | 2846 | /* | 
 | 2847 |  * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures  | 
 | 2848 |  *	that the pages_{min,low,high} values for each zone are set correctly  | 
 | 2849 |  *	with respect to min_free_kbytes. | 
 | 2850 |  */ | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 2851 | void setup_per_zone_pages_min(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2852 | { | 
 | 2853 | 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); | 
 | 2854 | 	unsigned long lowmem_pages = 0; | 
 | 2855 | 	struct zone *zone; | 
 | 2856 | 	unsigned long flags; | 
 | 2857 |  | 
 | 2858 | 	/* Calculate total number of !ZONE_HIGHMEM pages */ | 
 | 2859 | 	for_each_zone(zone) { | 
 | 2860 | 		if (!is_highmem(zone)) | 
 | 2861 | 			lowmem_pages += zone->present_pages; | 
 | 2862 | 	} | 
 | 2863 |  | 
 | 2864 | 	for_each_zone(zone) { | 
| Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 2865 | 		u64 tmp; | 
 | 2866 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2867 | 		spin_lock_irqsave(&zone->lru_lock, flags); | 
| Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 2868 | 		tmp = (u64)pages_min * zone->present_pages; | 
 | 2869 | 		do_div(tmp, lowmem_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2870 | 		if (is_highmem(zone)) { | 
 | 2871 | 			/* | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2872 | 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't | 
 | 2873 | 			 * need highmem pages, so cap pages_min to a small | 
 | 2874 | 			 * value here. | 
 | 2875 | 			 * | 
 | 2876 | 			 * The (pages_high-pages_low) and (pages_low-pages_min) | 
 | 2877 | 			 * deltas controls asynch page reclaim, and so should | 
 | 2878 | 			 * not be capped for highmem. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2879 | 			 */ | 
 | 2880 | 			int min_pages; | 
 | 2881 |  | 
 | 2882 | 			min_pages = zone->present_pages / 1024; | 
 | 2883 | 			if (min_pages < SWAP_CLUSTER_MAX) | 
 | 2884 | 				min_pages = SWAP_CLUSTER_MAX; | 
 | 2885 | 			if (min_pages > 128) | 
 | 2886 | 				min_pages = 128; | 
 | 2887 | 			zone->pages_min = min_pages; | 
 | 2888 | 		} else { | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2889 | 			/* | 
 | 2890 | 			 * If it's a lowmem zone, reserve a number of pages | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2891 | 			 * proportionate to the zone's size. | 
 | 2892 | 			 */ | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2893 | 			zone->pages_min = tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2894 | 		} | 
 | 2895 |  | 
| Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 2896 | 		zone->pages_low   = zone->pages_min + (tmp >> 2); | 
 | 2897 | 		zone->pages_high  = zone->pages_min + (tmp >> 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2898 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
 | 2899 | 	} | 
| Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 2900 |  | 
 | 2901 | 	/* update totalreserve_pages */ | 
 | 2902 | 	calculate_totalreserve_pages(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2903 | } | 
 | 2904 |  | 
 | 2905 | /* | 
 | 2906 |  * Initialise min_free_kbytes. | 
 | 2907 |  * | 
 | 2908 |  * For small machines we want it small (128k min).  For large machines | 
 | 2909 |  * we want it large (64MB max).  But it is not linear, because network | 
 | 2910 |  * bandwidth does not increase linearly with machine size.  We use | 
 | 2911 |  * | 
 | 2912 |  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: | 
 | 2913 |  *	min_free_kbytes = sqrt(lowmem_kbytes * 16) | 
 | 2914 |  * | 
 | 2915 |  * which yields | 
 | 2916 |  * | 
 | 2917 |  * 16MB:	512k | 
 | 2918 |  * 32MB:	724k | 
 | 2919 |  * 64MB:	1024k | 
 | 2920 |  * 128MB:	1448k | 
 | 2921 |  * 256MB:	2048k | 
 | 2922 |  * 512MB:	2896k | 
 | 2923 |  * 1024MB:	4096k | 
 | 2924 |  * 2048MB:	5792k | 
 | 2925 |  * 4096MB:	8192k | 
 | 2926 |  * 8192MB:	11584k | 
 | 2927 |  * 16384MB:	16384k | 
 | 2928 |  */ | 
 | 2929 | static int __init init_per_zone_pages_min(void) | 
 | 2930 | { | 
 | 2931 | 	unsigned long lowmem_kbytes; | 
 | 2932 |  | 
 | 2933 | 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); | 
 | 2934 |  | 
 | 2935 | 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16); | 
 | 2936 | 	if (min_free_kbytes < 128) | 
 | 2937 | 		min_free_kbytes = 128; | 
 | 2938 | 	if (min_free_kbytes > 65536) | 
 | 2939 | 		min_free_kbytes = 65536; | 
 | 2940 | 	setup_per_zone_pages_min(); | 
 | 2941 | 	setup_per_zone_lowmem_reserve(); | 
 | 2942 | 	return 0; | 
 | 2943 | } | 
 | 2944 | module_init(init_per_zone_pages_min) | 
 | 2945 |  | 
 | 2946 | /* | 
 | 2947 |  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so  | 
 | 2948 |  *	that we can call two helper functions whenever min_free_kbytes | 
 | 2949 |  *	changes. | 
 | 2950 |  */ | 
 | 2951 | int min_free_kbytes_sysctl_handler(ctl_table *table, int write,  | 
 | 2952 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 2953 | { | 
 | 2954 | 	proc_dointvec(table, write, file, buffer, length, ppos); | 
 | 2955 | 	setup_per_zone_pages_min(); | 
 | 2956 | 	return 0; | 
 | 2957 | } | 
 | 2958 |  | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2959 | #ifdef CONFIG_NUMA | 
 | 2960 | int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, | 
 | 2961 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 2962 | { | 
 | 2963 | 	struct zone *zone; | 
 | 2964 | 	int rc; | 
 | 2965 |  | 
 | 2966 | 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); | 
 | 2967 | 	if (rc) | 
 | 2968 | 		return rc; | 
 | 2969 |  | 
 | 2970 | 	for_each_zone(zone) | 
| Christoph Lameter | 8417bba | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 2971 | 		zone->min_unmapped_pages = (zone->present_pages * | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2972 | 				sysctl_min_unmapped_ratio) / 100; | 
 | 2973 | 	return 0; | 
 | 2974 | } | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 2975 |  | 
 | 2976 | int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, | 
 | 2977 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 2978 | { | 
 | 2979 | 	struct zone *zone; | 
 | 2980 | 	int rc; | 
 | 2981 |  | 
 | 2982 | 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); | 
 | 2983 | 	if (rc) | 
 | 2984 | 		return rc; | 
 | 2985 |  | 
 | 2986 | 	for_each_zone(zone) | 
 | 2987 | 		zone->min_slab_pages = (zone->present_pages * | 
 | 2988 | 				sysctl_min_slab_ratio) / 100; | 
 | 2989 | 	return 0; | 
 | 2990 | } | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2991 | #endif | 
 | 2992 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2993 | /* | 
 | 2994 |  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around | 
 | 2995 |  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() | 
 | 2996 |  *	whenever sysctl_lowmem_reserve_ratio changes. | 
 | 2997 |  * | 
 | 2998 |  * The reserve ratio obviously has absolutely no relation with the | 
 | 2999 |  * pages_min watermarks. The lowmem reserve ratio can only make sense | 
 | 3000 |  * if in function of the boot time zone sizes. | 
 | 3001 |  */ | 
 | 3002 | int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, | 
 | 3003 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 3004 | { | 
 | 3005 | 	proc_dointvec_minmax(table, write, file, buffer, length, ppos); | 
 | 3006 | 	setup_per_zone_lowmem_reserve(); | 
 | 3007 | 	return 0; | 
 | 3008 | } | 
 | 3009 |  | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 3010 | /* | 
 | 3011 |  * percpu_pagelist_fraction - changes the pcp->high for each zone on each | 
 | 3012 |  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist | 
 | 3013 |  * can have before it gets flushed back to buddy allocator. | 
 | 3014 |  */ | 
 | 3015 |  | 
 | 3016 | int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | 
 | 3017 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 3018 | { | 
 | 3019 | 	struct zone *zone; | 
 | 3020 | 	unsigned int cpu; | 
 | 3021 | 	int ret; | 
 | 3022 |  | 
 | 3023 | 	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); | 
 | 3024 | 	if (!write || (ret == -EINVAL)) | 
 | 3025 | 		return ret; | 
 | 3026 | 	for_each_zone(zone) { | 
 | 3027 | 		for_each_online_cpu(cpu) { | 
 | 3028 | 			unsigned long  high; | 
 | 3029 | 			high = zone->present_pages / percpu_pagelist_fraction; | 
 | 3030 | 			setup_pagelist_highmark(zone_pcp(zone, cpu), high); | 
 | 3031 | 		} | 
 | 3032 | 	} | 
 | 3033 | 	return 0; | 
 | 3034 | } | 
 | 3035 |  | 
| David S. Miller | f034b5d | 2006-08-24 03:08:07 -0700 | [diff] [blame] | 3036 | int hashdist = HASHDIST_DEFAULT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3037 |  | 
 | 3038 | #ifdef CONFIG_NUMA | 
 | 3039 | static int __init set_hashdist(char *str) | 
 | 3040 | { | 
 | 3041 | 	if (!str) | 
 | 3042 | 		return 0; | 
 | 3043 | 	hashdist = simple_strtoul(str, &str, 0); | 
 | 3044 | 	return 1; | 
 | 3045 | } | 
 | 3046 | __setup("hashdist=", set_hashdist); | 
 | 3047 | #endif | 
 | 3048 |  | 
 | 3049 | /* | 
 | 3050 |  * allocate a large system hash table from bootmem | 
 | 3051 |  * - it is assumed that the hash table must contain an exact power-of-2 | 
 | 3052 |  *   quantity of entries | 
 | 3053 |  * - limit is the number of hash buckets, not the total allocation size | 
 | 3054 |  */ | 
 | 3055 | void *__init alloc_large_system_hash(const char *tablename, | 
 | 3056 | 				     unsigned long bucketsize, | 
 | 3057 | 				     unsigned long numentries, | 
 | 3058 | 				     int scale, | 
 | 3059 | 				     int flags, | 
 | 3060 | 				     unsigned int *_hash_shift, | 
 | 3061 | 				     unsigned int *_hash_mask, | 
 | 3062 | 				     unsigned long limit) | 
 | 3063 | { | 
 | 3064 | 	unsigned long long max = limit; | 
 | 3065 | 	unsigned long log2qty, size; | 
 | 3066 | 	void *table = NULL; | 
 | 3067 |  | 
 | 3068 | 	/* allow the kernel cmdline to have a say */ | 
 | 3069 | 	if (!numentries) { | 
 | 3070 | 		/* round applicable memory size up to nearest megabyte */ | 
 | 3071 | 		numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; | 
 | 3072 | 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1; | 
 | 3073 | 		numentries >>= 20 - PAGE_SHIFT; | 
 | 3074 | 		numentries <<= 20 - PAGE_SHIFT; | 
 | 3075 |  | 
 | 3076 | 		/* limit to 1 bucket per 2^scale bytes of low memory */ | 
 | 3077 | 		if (scale > PAGE_SHIFT) | 
 | 3078 | 			numentries >>= (scale - PAGE_SHIFT); | 
 | 3079 | 		else | 
 | 3080 | 			numentries <<= (PAGE_SHIFT - scale); | 
 | 3081 | 	} | 
| John Hawkes | 6e692ed | 2006-03-25 03:08:02 -0800 | [diff] [blame] | 3082 | 	numentries = roundup_pow_of_two(numentries); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3083 |  | 
 | 3084 | 	/* limit allocation size to 1/16 total memory by default */ | 
 | 3085 | 	if (max == 0) { | 
 | 3086 | 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; | 
 | 3087 | 		do_div(max, bucketsize); | 
 | 3088 | 	} | 
 | 3089 |  | 
 | 3090 | 	if (numentries > max) | 
 | 3091 | 		numentries = max; | 
 | 3092 |  | 
 | 3093 | 	log2qty = long_log2(numentries); | 
 | 3094 |  | 
 | 3095 | 	do { | 
 | 3096 | 		size = bucketsize << log2qty; | 
 | 3097 | 		if (flags & HASH_EARLY) | 
 | 3098 | 			table = alloc_bootmem(size); | 
 | 3099 | 		else if (hashdist) | 
 | 3100 | 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); | 
 | 3101 | 		else { | 
 | 3102 | 			unsigned long order; | 
 | 3103 | 			for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) | 
 | 3104 | 				; | 
 | 3105 | 			table = (void*) __get_free_pages(GFP_ATOMIC, order); | 
 | 3106 | 		} | 
 | 3107 | 	} while (!table && size > PAGE_SIZE && --log2qty); | 
 | 3108 |  | 
 | 3109 | 	if (!table) | 
 | 3110 | 		panic("Failed to allocate %s hash table\n", tablename); | 
 | 3111 |  | 
 | 3112 | 	printk("%s hash table entries: %d (order: %d, %lu bytes)\n", | 
 | 3113 | 	       tablename, | 
 | 3114 | 	       (1U << log2qty), | 
 | 3115 | 	       long_log2(size) - PAGE_SHIFT, | 
 | 3116 | 	       size); | 
 | 3117 |  | 
 | 3118 | 	if (_hash_shift) | 
 | 3119 | 		*_hash_shift = log2qty; | 
 | 3120 | 	if (_hash_mask) | 
 | 3121 | 		*_hash_mask = (1 << log2qty) - 1; | 
 | 3122 |  | 
 | 3123 | 	return table; | 
 | 3124 | } | 
| KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 3125 |  | 
 | 3126 | #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE | 
| KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 3127 | struct page *pfn_to_page(unsigned long pfn) | 
 | 3128 | { | 
| Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 3129 | 	return __pfn_to_page(pfn); | 
| KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 3130 | } | 
 | 3131 | unsigned long page_to_pfn(struct page *page) | 
 | 3132 | { | 
| Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 3133 | 	return __page_to_pfn(page); | 
| KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 3134 | } | 
| KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 3135 | EXPORT_SYMBOL(pfn_to_page); | 
 | 3136 | EXPORT_SYMBOL(page_to_pfn); | 
 | 3137 | #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ |